diff --git a/solutions/cncf-generated/akri/akri-346-coap-discovery-handler.json b/solutions/cncf-generated/akri/akri-346-coap-discovery-handler.json new file mode 100644 index 00000000..c0bc2fde --- /dev/null +++ b/solutions/cncf-generated/akri/akri-346-coap-discovery-handler.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:20.439Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "akri: CoAP discovery handler", + "description": "**What this PR does / why we need it**: it adds support for the CoAP protocol, which is an important protocol for RESTful IoT devices.\n\n**Special notes for your reviewer**:\n\n**If applicable**:\n- [x] this PR contains documentation\n- [x] this PR contains unit tests\n- [ ] added code adheres to standard Rust formatting (`cargo fmt`)\n- [x] code builds properly (`cargo build`)\n- [ ] code is free of common mistakes (`cargo clippy`)\n- [ ] all Akri tests succeed (`cargo test`)\n- [ ] inline documentation builds (`cargo doc`)\n- [ ] version has been updated appropriately (`./version.sh`)", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "there are a lot of `//TODO` comments. i wonder if these shouldn't be converted to issues?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "akri", + "sandbox", + "app-definition", + "keep-alive", + "stale" + ], + "category": "workloads", + "cncfProjects": [ + "akri" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/project-akri/akri/pull/346", + "sourceRepo": "project-akri/akri", + "reactions": 1, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:47:20.439Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/antrea/antrea-1196-use-specific-kustomize-version-when-generating-manifests.json b/solutions/cncf-generated/antrea/antrea-1196-use-specific-kustomize-version-when-generating-manifests.json new file mode 100644 index 00000000..8aa762e9 --- /dev/null +++ b/solutions/cncf-generated/antrea/antrea-1196-use-specific-kustomize-version-when-generating-manifests.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:25.147Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "antrea: Use specific kustomize version when generating manifests", + "description": "Ensure that we use the desired version of kustomize even when there is\nalready an installation of kustomize. This is important because in\nv3.8.0, kustomize stopped using apimachinery by default and switched\nto its own library (kyaml) for K8s resource YAML manipulation. Because\nof this change, the generated YAMLs are different: fields within objects\nmay be ordered differently, and the latest kustomize generally does a\nbetter job dropping empty fields. We set the desired version to v3.8.2.\n\nNo action should be required from Antrea developers. The next time they\nrun `make manifest` locally, the correct version of kustomize will be\ndownloaded under `./hack/.bin`.\n\nFixes #975\nFixes #1017", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "In v3.8.0, kustomize stopped using apimachinery by default and switched\nto its own library (kyaml) for K8s resource YAML manipulation. Because\nof this change, the generated YAMLs are different: fields within objects\nmay be ordered differently, and the latest kustomize generally does a\nbetter job dropping empty fields. We are switching the min required\nversion of kustomize to 3.8.1 so that Antrea developers can keep working\nwith a recent version of kustomize without CI checks failing. Note that\nwe are using 3.8.1 and not 3.8.0 which has some known issues.\n\nFor new developers which do not have kustomize, the new version will be\ninstalled automatically when running generate-manifest.sh. Others will\nsee an error message about their version of kustomize being too old, and\nthey can update with:\n\n```\nGO111MODULE=on go get sigs.k8s.io/kustomize/kustomize/v3\n```\n\nFixes #975", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "GO111MODULE=on go get sigs.k8s.io/kustomize/kustomize/v3" + ] + } + }, + "metadata": { + "tags": [ + "antrea", + "sandbox", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "antrea" + ], + "targetResourceKinds": [ + "Job" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/antrea-io/antrea/pull/1196", + "sourceRepo": "antrea-io/antrea", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:47:25.147Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/antrea/antrea-2121-packet-in-rate-limiting-with-of-meter.json b/solutions/cncf-generated/antrea/antrea-2121-packet-in-rate-limiting-with-of-meter.json new file mode 100644 index 00000000..00caaf24 --- /dev/null +++ b/solutions/cncf-generated/antrea/antrea-2121-packet-in-rate-limiting-with-of-meter.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:23.516Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "antrea: Packet-in rate limiting with OF Meter", + "description": "Add OF meter to implement rate-limiting for packet-in.\n\n1. Add meter entry while initialization.\n2. While building the flow that will trigger packet-in, except traceflow, apply meter entry to it.\n3. Update libOpenflow and ofnet version, because OF meter related commit has been merged into them.\n\nSince windows OVS doesn't support OF meter, we skip OF meter related operations for now. We contacted OVS team to ask for this support and will remove skip after windows OVS support OF meter.\n\nThe benchmarks are as below:\n1. Use `hping3 10.10.1.9 -p 80 --flood -2` and OF meter `pktps bands=type=drop rate=100`\n```\ntop - 22:28:03 up 5 days, 5:34, 1 user, load average: 0.03, 0.38, 0.55\nTasks: 157 total, 1 running, 156 sleeping, 0 stopped, 0 zombie\n%Cpu(s): 4.0 us, 5.8 sy, 0.0 ni, 60.2 id, 0.0 wa, 0.0 hi, 30.0 si, 0.0 st\nMiB Mem : 1987.6 total, 587.1 free, 472.5 used, 927.9 buff/cache\nMiB Swap: 0.0 total, 0.0 free, 0.0 used. 1333.6 avail Mem\n\n PID USER ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Add OF meter to implement rate-limiting for packet-in messages.\n\n1. Add meter entries during initialization: one for Traceflow packets\n and one for other (NetworkPolicy-related) packets.\n2. While building the flow that will trigger packet-in, use the meter\n action.\n3. Update libOpenflow and ofnet version, to get meter programming\n support.\n\nSince Windows OVS doesn't support OF meters, we skip OF meter related\noperations for now. On Linux, for the OVS kernel datapath, kernel\nversion 4.18 is required for meter support (should be 4.15, but is 4.18\nin practice because of an implementation bug): we add a check and\ndisable meters if the Linux kernel is not recent enough. This is to\navoid increasing the minimum kernel version requirement for Antrea, at\nleast for now.\n\nFixes #2069\n\nCo-authored-by: Antonin Bas \n\nSigned-off-by: wgrayson \nSigned-off-by: Antonin Bas ", + "steps": [ + "Add meter entries during initialization: one for Traceflow packets", + "While building the flow that will trigger packet-in, use the meter", + "Update libOpenflow and ofnet version, to get meter programming" + ], + "codeSnippets": [ + "top - 22:28:03 up 5 days, 5:34, 1 user, load average: 0.03, 0.38, 0.55\r\nTasks: 157 total, 1 running, 156 sleeping, 0 stopped, 0 zombie\r\n%Cpu(s): 4.0 us, 5.8 sy, 0.0 ni, 60.2 id, 0.0 wa, 0.0 hi, 30.0 si, 0.0 st\r\nMiB Mem : 1987.6 total, 587.1 free, 472.5 used, 927.9 buff/cache\r\nMiB Swap: 0.0 total, 0.0 free, 0.0 used. 1333.6 avail Mem\r\n\r\n PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND\r\n2022714 root 10 -10 235600 39464 12156 S 7.3 1.9 0:02.00 ovs-vswitchd\r\n 10 root 20 0 0 0 0 S 5.7 0.0 0:42.08 ksoftirqd/0\r\n2022555 root 20 0 1267928 51976 34572 S 5.0 2.6 0:02.49 antrea-agent\r\n 14079 root 20 0 1941628 72840 30968 S 2.3 3.6 164:48.40 kubelet", + "top - 22:19:06 up 5 days, 5:25, 1 user, load average: 1.48, 1.02, 0.75\r\nTasks: 147 total, 3 running, 144 sleeping, 0 stopped, 0 zombie\r\n%Cpu(s): 33.3 us, 19.1 sy, 0.0 ni, 11.1 id, 0.0 wa, 0.0 hi, 36.5 si, 0.0 st\r\nMiB Mem : 1987.6 total, 566.6 free, 480.7 used, 940.3 buff/cache\r\nMiB Swap: 0.0 total, 0.0 free, 0.0 used. 1326.7 avail Mem\r\n\r\n PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND\r\n2010190 root 10 -10 235704 46428 12156 R 98.0 2.3 1:40.03 ovs-vswitchd\r\n2009999 root 20 0 1341660 56128 34732 R 55.4 2.8 1:02.89 antrea-agent\r\n 10 root 20 0 0 0 0 S 7.3 0.0 0:40.94 ksoftirqd/0\r\n 14079 root 20 0 1941628 73520 31496 S 3.6 3.6 164:32.39 kubelet" + ] + } + }, + "metadata": { + "tags": [ + "antrea", + "sandbox", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "antrea" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/antrea-io/antrea/pull/2121", + "sourceRepo": "antrea-io/antrea", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:47:23.516Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/antrea/antrea-5483-do-not-delete-ipv6-link-local-route-in-reconciler.json b/solutions/cncf-generated/antrea/antrea-5483-do-not-delete-ipv6-link-local-route-in-reconciler.json new file mode 100644 index 00000000..c0c264c1 --- /dev/null +++ b/solutions/cncf-generated/antrea/antrea-5483-do-not-delete-ipv6-link-local-route-in-reconciler.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:26.872Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "antrea: Do not delete IPv6 link-local route in reconciler", + "description": "In the existing code, the IPv6 link-local route on antrea-gw0 is deleted in\nroute reconcile, which results in the IPv6 Neighbor Solicitation sent from Pod's\nlink-local address is dropped on the Node by kenel reverse path filtering, and\nPod would mark the antrea-gw0 as a \"FAILED\" neighbor. Then the Pod's accross\nNode traffic or the Pod-to-external traffic does not work as expected.\n \nThis change includes,\n1. Do not delete IPv6 link-local routes in the reconcile function,\n2. Restore IPv6 link-local route in syncRoute function.\n\nFix: #5482", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Cherry pick of #5483 on release-1.13.\n\n#5483: Do not delete IPv6 link-local route in reconciler\n\nFor details on the cherry pick process, see the [cherry pick requests](https://github.com/antrea-io/antrea/blob/main/docs/contributors/cherry-picks.md) page.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "antrea", + "sandbox", + "networking", + "area-transit-ipv6", + "action-backport", + "action-release-note" + ], + "category": "networking", + "cncfProjects": [ + "antrea" + ], + "targetResourceKinds": [ + "Pod", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/antrea-io/antrea/pull/5483", + "sourceRepo": "antrea-io/antrea", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:47:26.872Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-10092-feat-applicationset-reuse-repo-creds-for-an-existing-github-app.json b/solutions/cncf-generated/argo/argo-10092-feat-applicationset-reuse-repo-creds-for-an-existing-github-app.json new file mode 100644 index 00000000..a5418c6b --- /dev/null +++ b/solutions/cncf-generated/argo/argo-10092-feat-applicationset-reuse-repo-creds-for-an-existing-github-app.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:47.497Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat(applicationset): reuse repo-creds for an existing GitHub App", + "description": "Closes #10079", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@iamnoah makes sense. Did you find that there's something acting as a hard barrier to auto-discovery, or simply that it was too involved to tackle in this PR?\n\nJust curious, in case someone decides to pick up auto-discovery in the future.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/10092", + "sourceRepo": "argoproj/argo-cd", + "reactions": 19, + "comments": 7 + }, + "security": { + "scannedAt": "2026-02-27T17:43:47.497Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-10432-feat-multiple-sources-for-applications.json b/solutions/cncf-generated/argo/argo-10432-feat-multiple-sources-for-applications.json new file mode 100644 index 00000000..98ff855c --- /dev/null +++ b/solutions/cncf-generated/argo/argo-10432-feat-multiple-sources-for-applications.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:21.339Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: Multiple sources for applications", + "description": "This change enables users to provide multiple resources for the Application. The change aims to be fully backwards compatible.\n\nThis PR implements proposal https://github.com/argoproj/argo-cd/pull/8322.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is just a draft PR to indicate that the work has started on the proposal implementation. \n**The PR is not ready for review.**\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [ ] The title of the PR states what changed and the related issues number (used for the release note).\n* [ ] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [ ] Does this PR require documentation updates?\n* [ ] I've updated documentation as required by this PR.\n* [ ] Optional. My organization is added to USERS.md.\n* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)\n* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.\n* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "spec:\r\n source:\r\n repoURL: https://github.com/argoproj/argocd-example-apps.git\r\n path: guestbook\r\n targetRevision: HEAD\r\n sources:\r\n - chart: elasticsearch\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n repoURL: https://helm.elastic.co\r\n targetRevision: 7.6.0\r\n - repoURL: https://github.com/argoproj/argocd-example-apps.git\r\n path: guestbook\r\n targetRevision: HEAD", + "spec:\r\n sources:\r\n - repoURL: https://github.com/my-org/my-repo # path is missing so no manifests are generated\r\n targetRevision: master\r\n ref: myRepo # repo is available via symlink \"myRepo\"\r\n - repoURL: https://github.com/helm/charts\r\n targetRevision: master\r\n path: incubator/elasticsearch # path \"incubator/elasticsearch\" is used to generate manifests\r\n helm:\r\n valueFiles:\r\n - $myRepo/values.yaml # values.yaml is located in source with reference name $myRepo", + "apiVersion: argoproj.io/v1alpha1\r\nkind: Application\r\nmetadata:\r\n name: guestbook\r\n namespace: argocd\r\n labels:\r\n argocd.argoproj.io/refresh: hard\r\nspec:\r\n project: default\r\n syncPolicy:\r\n automated:\r\n prune: true\r\n selfHeal: true\r\n destination:\r\n server: https://kubernetes.default.svc\r\n namespace: argocd\r\n sources:\r\n - chart: elasticsearch\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n repoURL: https://helm.elastic.co\r\n targetRevision: 7.6.0\r\n - repoURL: https://github.com/argoproj/argocd-example-apps.git\r\n path: guestbook\r\n targetRevision: HEAD\r\n - chart: elasticsearch\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n repoURL: https://helm.elastic.co\r\n targetRevision: 7.7.0" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/10432", + "sourceRepo": "argoproj/argo-cd", + "reactions": 147, + "comments": 85 + }, + "security": { + "scannedAt": "2026-02-27T17:43:21.340Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-11183-feat-appset-add-stringtemplate-field-to-spec.json b/solutions/cncf-generated/argo/argo-11183-feat-appset-add-stringtemplate-field-to-spec.json new file mode 100644 index 00000000..c70b0dd3 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-11183-feat-appset-add-stringtemplate-field-to-spec.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:41.108Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat(appset): Add stringTemplate field to spec", + "description": "Closes: #11213\n\nThis will introduce a new field `stringTemplate` that will allow a less restrictive templating of the Application to be generated as the current design is limited by field.\n\nRelated discussions:\n- https://github.com/argoproj/argo-cd/pull/10026#issuecomment-1236888623\n\n*This work was started in https://github.com/argoproj/argo-cd/pull/9873*\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [ ] The title of the PR states what changed and the related issues number (used for the release note).\n* [ ] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automa", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Signed-off-by: rishabh625 \n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [ ] The title of the PR states what changed and the related issues number (used for the release note).\n* [ ] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [ ] Does this PR require documentation updates?\n* [ ] I've updated documentation as required by this PR.\n* [ ] Optional. My organization is added to USERS.md.\n* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)\n* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.\n* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)). \n\nThis is the PR moved from argoproj/applicationset \n\nhttps://github.com/argoproj/applicationset/pull/513\n\nThanks to @vavdoshka for all the effo", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Hello @speedfl, thanks for the prompt reply,\r\n\r\n> @mrmm are you sure you want to create the method `renderWithGoTemplate`. Can the `Replace` in utils do the job ?\r\n\r\nYes totally, I have just tried to use the work of @rishabh625 (as it was his idea) but using `r.Replace` does the job (which I have updated in b91759c)\r\n\r\n> Concerning your tests as you are passing the `stringTemplate` you need to add test with `stringTemplate != nil` in the `utils_test.go`\r\n\r\nThanks for the pointer on how to test, here is my try https://github.com/mrmm/argo-cd/blob/437a9a82fc40e857ec4373297d7a521684bbdc87/applicationset/utils/utils_test.go#L19 *~but I seem to be running into an Unmarshaling issue for some reason I couldn't find.~*\r\n\r\n\r\n(Please let me know if you don't have the time to check this I will gladly stop pinging here 🙏 )\r\n\r\n-----\r\n\r\n>", + "*Edit: found my unmarshal mistake in https://github.com/argoproj/argo-cd/pull/11183/commits/437a9a82fc40e857ec4373297d7a521684bbdc87*\n# [Codecov](https://codecov.io/gh/argoproj/argo-cd/pull/11183?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) Report\nBase: **47.00**% // Head: **46.95**% // Decreases project coverage by **`-0.05%`** :warning:\n> Coverage data is based on head [(`fc26e2e`)](https://codecov.io/gh/argoproj/argo-cd/pull/11183?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) compared to base [(`9fe4ad3`)](https://codecov.io/gh/argoproj/argo-cd/commit/9fe4ad3253840761e418e7c76e85ef090ffa73fd?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n> Patch coverage: 58.82% of modified lines in pull request are covered.\n\n> :exclamation: Current head fc26e2e differs from pull request most recent head 21bbdcc. Consider uploading reports for the commit 21bbdcc to get more accurate results\n\n
Additional details and impacted files" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/11183", + "sourceRepo": "argoproj/argo-cd", + "reactions": 24, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:43:41.108Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-11567-feat-appset-advanced-templating-for-applicationset.json b/solutions/cncf-generated/argo/argo-11567-feat-appset-advanced-templating-for-applicationset.json new file mode 100644 index 00000000..6200d373 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-11567-feat-appset-advanced-templating-for-applicationset.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:36.853Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat(appset): Advanced templating for ApplicationSet", + "description": "Signed-off-by: gmuselli \n\n@crenshaw-dev I think you were interested by this one\n\nCloses #11164\nCloses https://github.com/argoproj/argo-cd/issues/9177\n\nMain purpose is to have an ApplicationSet Template as `map[string]interface{}` which can be fully templatable\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: ApplicationSet\nmetadata:\n name: guestbook\nspec:\n goTemplate: true\n generators:\n - list:\n elements:\n - cluster: engineering-dev\n url: https://kubernetes.default.svc\n automated: true\n prune: true\n - cluster: engineering-prod\n url: https://kubernetes.default.svc\n automated: true\n prune: false\n - cluster: engineering-debug\n url: https://kubernetes.default.svc\n automated: false\n prune: false\n template:\n metadata:\n name: '{{.cluster}}'\n spec:\n project: default\n source:\n repoURL: https://github.com/argoproj/argo-cd.git\n targetRevision: HEAD\n", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@speedfl you're a force of nature.\n\nI doubt I'll personally have time to review this in time for 2.6. But if the community likes the approach, I'd love to get it into 2.7.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nmetadata:\r\n name: guestbook\r\nspec:\r\n goTemplate: true\r\n generators:\r\n - list:\r\n elements:\r\n - cluster: engineering-dev\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: true\r\n - cluster: engineering-prod\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: false\r\n - cluster: engineering-debug\r\n url: https://kubernetes.default.svc\r\n automated: false\r\n prune: false\r\n template:\r\n metadata:\r\n name: '{{.cluster}}'\r\n spec:\r\n project: default\r\n source:\r\n repoURL: https://github.com/argoproj/argo-cd.git\r\n targetRevision: HEAD\r\n path: applicationset/examples/list-generator/guestbook/{{.cluster}}\r\n destination:\r\n server: '{{.url}}'\r\n namespace: guestbook\r\n syncPolicy:\r\n # If automated == true, it will generate a key 'automated' which is part of the Application Spec model. It will then be retained\r\n # If automated == false, it will generate a key 'noAuto' which is not part of the Application Spec model. It will then be ignored\r\n '{{ ternary \"automated\" \"noAuto\" .automated }}':\r\n # If prune == true, it will generate a key 'prune' which is part of the Application Spec model. It will then be retained\r\n # If prune == false, it will generate a key 'noprune' which is not part of the Application Spec model. It will then be ignored\r\n '{{ ternary \"prune\" \"noprune\" .prune }}': true", + "| [Impacted Files](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) | Coverage Δ | |\n|---|---|---|\n| [applicationset/generators/duck\\_type.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvZ2VuZXJhdG9ycy9kdWNrX3R5cGUuZ28=) | `70.18% <0.00%> (ø)` | |\n| [applicationset/generators/pull\\_request.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvZ2VuZXJhdG9ycy9wdWxsX3JlcXVlc3QuZ28=) | `52.23% <0.00%> (ø)` | |\n| [applicationset/generators/scm\\_provider.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvZ2VuZXJhdG9ycy9zY21fcHJvdmlkZXIuZ28=) | `34.19% <0.00%> (ø)` | |\n| [.../apis/application/v1alpha1/applicationset\\_types.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-cGtnL2FwaXMvYXBwbGljYXRpb24vdjFhbHBoYTEvYXBwbGljYXRpb25zZXRfdHlwZXMuZ28=) | `29.26% <ø> (+0.69%)` | :arrow_up: |\n| [util/argo/argo.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-dXRpbC9hcmdvL2FyZ28uZ28=) | `64.40% <0.00%> (-2.06%)` | :arrow_down: |\n| [cmd/argocd/commands/applicationset.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-Y21kL2FyZ29jZC9jb21tYW5kcy9hcHBsaWNhdGlvbnNldC5nbw==) | `18.25% <33.66%> (+1.19%)` | :arrow_up: |\n| [...licationset/generators/generator\\_spec\\_processor.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvZ2VuZXJhdG9ycy9nZW5lcmF0b3Jfc3BlY19wcm9jZXNzb3IuZ28=) | `64.00% <60.00%> (-2.30%)` | :arrow_down: |\n| [applicationset/utils/utils.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvdXRpbHMvdXRpbHMuZ28=) | `61.99% <60.60%> (-13.60%)` | :arrow_down: |\n| [...cationset/controllers/applicationset\\_controller.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvY29udHJvbGxlcnMvYXBwbGljYXRpb25zZXRfY29udHJvbGxlci5nbw==) | `63.15% <100.00%> (-0.29%)` | :arrow_down: |\n| [applicationset/generators/cluster.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvZ2VuZXJhdG9ycy9jbHVzdGVyLmdv) | `80.27% <100.00%> (ø)` | |\n| ... and [4 more](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) | |\n\n\n
\n\n[:umbrella: View full report in Codecov by Sentry](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). \n:loudspeaker: Do you have feedback about the report comment? [Let us know in this issue](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n\n@speedfl you're a force of nature.\r\n\r\nI doubt I'll personally have time to review this in time for 2.6. But if the community likes the approach, I'd love to get it into 2.7.\n@crenshaw-dev can you explain how can we as community motivate you more to \"merge it asap as this is what we need\"? 😄\nInteresting approach. I think this is exactly what I need. Would we be to toggle `automated` by using only a single variable as in the example bellow?", + "@boedy yes. You Can take a look to examples and e2e\napologies if this repeating the same question, but can parameter values provided by various generators be used as a condition here, e.g labels provided by cluster generator?" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/11567", + "sourceRepo": "argoproj/argo-cd", + "reactions": 31, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:43:36.853Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-12508-feat-combine-files-in-a-multisource-repo-12471-12485.json b/solutions/cncf-generated/argo/argo-12508-feat-combine-files-in-a-multisource-repo-12471-12485.json new file mode 100644 index 00000000..f56b1d44 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-12508-feat-combine-files-in-a-multisource-repo-12471-12485.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:25.855Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: Combine files in a multisource repo (#12471 #12485)", + "description": "Fixes #12471\nFixes #12476\nFixes #7189\nFixes #13220\nFixes #14521\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [x] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [x] The title of the PR states what changed and the related issues number (used for the release note).\n* [x] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [x] Does this PR require documentation updates?\n* [x] I've updated documentation as required by this PR.\n* [x] Optional. My organization is added to USE", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Note on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [x] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [x] The title of the PR states what changed and the related issues number (used for the release note).\n* [ ] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [ ] Does this PR require documentation updates?\n* [ ] I've updated documentation as required by this PR.\n* [x] Optional. My organization is added to USERS.md.\n* [x] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)\n* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.\n* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/argoproj/argo-cd/pull/12508?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n\nThere's a lot of code duplicated from `repository.go:679` (valueFiles logic) to the new copyFrom logic. I think these should be refactored to call common functions. Also, they way that both parts are checking out repos and deferring to closer functions for cleanup individually is something I don't like the idea of. Probably repos should be checked out once, cached, and when it's time for the manifest generation released and cleaned up.\r\n\r\nI think once the copyFrom logic is there and I both have a clearer view of how stuff works and I'm more comfortable with the logics here, I will see how can I clean this mess up.\r\n\r\nAnd I'm also planning to address the LintGo failing test later.\nI've got the following errors while running `test-local`:", + "And the String operator for ApplicationSources is defined at `pkg/apis/application/v1alpha1/generated.pb.go:16086`:", + "IIRC I haven't fiddled with this part, and I'm not really sure what might have caused those test errors.\r\n\nI think the PR looks feature-complete right now, unit tests are also passing. The place I couldn't find the tests for are the changes in the `reposerver/repository/repository.go:runManifestGenAsync` function, that's where the logic responsible for this change is called. Everything else should be covered by the unit tests.\r\n\r\nDocs are still to be adjusted.\r\n\r\n\nDocs are updated as well.\r\n\r\n@ishitasequeira, could you please review it?\r\n\n> Docs are updated as well.\r\n> \r\n> @ishitasequeira, could you please review it?\r\n\r\nWill review it today.\nThis is looking really cool!\r\n\r\nWe'll probably want some e2e tests to validate a few base use cases for this feature.\nOne thought that comes to my mind is, are we allowing a nested access of referenced sources for `from`? For example, something like below:" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/12508", + "sourceRepo": "argoproj/argo-cd", + "reactions": 65, + "comments": 102 + }, + "security": { + "scannedAt": "2026-02-27T17:43:25.855Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-13912-feat-add-ignoreresourceupdates-to-reduce-controller-cpu-usage-13534.json b/solutions/cncf-generated/argo/argo-13912-feat-add-ignoreresourceupdates-to-reduce-controller-cpu-usage-13534.json new file mode 100644 index 00000000..8f1e44d5 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-13912-feat-add-ignoreresourceupdates-to-reduce-controller-cpu-usage-13534.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:42.229Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: add `ignoreResourceUpdates` to reduce controller CPU usage (#13534)", + "description": "Closes #13534 https://github.com/argoproj/argo-cd/issues/6108 https://github.com/argoproj/argo-cd/issues/13614 https://github.com/argoproj/argo-cd/issues/8471 https://github.com/argoproj/argo-cd/issues/8100 https://github.com/argoproj/argo-cd/issues/7406 https://github.com/argoproj/argo-cd/issues/9014 https://github.com/argoproj/argo-cd/issues/9819\n\nChanges:\n- Adding `ignoreResourceUpdates` global configuration to ignore fields before to hash resources.\n- Adding `ignoreDifferencesOnResourceUpdates` config to use ignoreDifferences automatically to `ignoreResourceUpdates`.\n- Hashing resources **directly** part of an application.\n- Filtering out resource updates when hash is the same\n- Changed `Refreshing app %s for change in cluster of object %s of type %s/%s` debug log to info to help get statistics and configure `ignoreResourceUpdates`.\n - For Splunk, `msg=\"Refreshing app*for change*\" | rex field=msg \"Refreshing app (?\\S+) for change in cluster of object (?\\S+) ", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "@jaideepr97 \nIf your question is related to what is the difference between both: The ignoreDifferences are used to evaluate during the reconcile/refresh of the application if the app should be synchronized or not. Reconcile is the operation consuming the CPU, not sync. So we need to avoid performing the reconcile on watched resources. Take a look at https://github.com/argoproj/argo-cd/issues/13534 for more details.\n\nIf your question is related to why there are 2 different settings and ignoreDifferences could not be reused to skip the reconcile as well: In our case, ignore difference has more configuration than what is necessary for the reconcile optimization. It is also hard/impossible to know what everyone has configured. Having 2 configurations prevents the possibility of conflicts. However, `ignoreDifferencesOnResourceUpdates` is there to simplify the configuration and prevent duplicates, especially for people that do not already ignore differences in the `/status` field of all objects. Another thing is that ignoreDifferences is only applied to resources defined by the user in git, while ignoreResourceUpdates could/should ideally be applied on any resources affecting the application.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "resource.customizations.ignoreResourceUpdates.all: |\r\n jsonPointers:\r\n - /status", + "One \"glitch\" that I am seeing is when a ReplicaSet is scaled down (by HPA). When a pod is set to terminating, its health turns to \"progressing\", the Application health also changes to \"progressing\". \r\n\r\nHowever, when the pods \"disappear\" from the UI, the Application status is not updated and stays \"Progressing\".\r\n\r\nI expect the `OnResourceUpdated` callback to be called when a resource is deleted based on the code with newRes as `nil` and the reconcile to always happens, so I am unsure where the problem can be.\r\n\r\nThe app status is the following, but no resources in `status.resources` are progressing.", + "@agaudreault-jive thanks to you and your company for this significant contribution to improving performance \r\n@crenshaw-dev, as always, the exemplar maintainer taking time to review and discuss the PR \r\n🎉\r\n\nI Added to my config an additional section:" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/13912", + "sourceRepo": "argoproj/argo-cd", + "reactions": 24, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:43:42.229Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-14124-feat-application-controller-add-support-for-rollback-multi-source-app.json b/solutions/cncf-generated/argo/argo-14124-feat-application-controller-add-support-for-rollback-multi-source-app.json new file mode 100644 index 00000000..5bb007d3 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-14124-feat-application-controller-add-support-for-rollback-multi-source-app.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:23.734Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat(application-controller): Add support for rollback multi-source applications", + "description": "Signed-off-by: Jorge Turrado \n\nCurrently, ArgoCD support multi-source applications and that's a really nice feature, but using multi source, end users have to sacrify the option of executing fast rollbacks because the rollbacks are not supported for multi-source applications. \nThis PR adds the support for rollback multi-source applications (as single source applications do)\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [x] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [x] The title of the PR states what changed and the related issues number (used for the release note).\n* [x] The title of the PR conforms to the [T", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thanks for the PR! We're past feature freeze for 2.8, so I'll queue this up for the 2.9 roadmap.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/14124", + "sourceRepo": "argoproj/argo-cd", + "reactions": 81, + "comments": 66 + }, + "security": { + "scannedAt": "2026-02-27T17:43:23.734Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-14208-feat-allow-multiple-extenal-urls-for-sso-access.json b/solutions/cncf-generated/argo/argo-14208-feat-allow-multiple-extenal-urls-for-sso-access.json new file mode 100644 index 00000000..fefb1c80 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-14208-feat-allow-multiple-extenal-urls-for-sso-access.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:33.431Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: Allow multiple extenal URLs for SSO access", + "description": "With #4780, were introduced security measures to ensure the `return_url` is pointing to the current ArgoCD instance.\n\nIn several occasions, an ArgoCD isntance could be exposed through multiple network connections. Internal addresses and restricted public addresses.\n\nCurrently, a single base URL can be configured in the the argocd configmap, preventing from exposing ArgoCD on several access paths.\n\nThis change allows to define multiple hosts on which ArgoCD can be exposed, keeping backward compatibility by adding a field `additionalUrls` accepting a list of additional URLS on which argoCD can be exposed\n\nFixes #5388\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) t", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is very useful PR @tjamet 👍", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition", + "lifecycle-rotten" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [ + "Configmap" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/14208", + "sourceRepo": "argoproj/argo-cd", + "reactions": 36, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:43:33.431Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-14893-feat-appset-advanced-templating-using-templatepatch.json b/solutions/cncf-generated/argo/argo-14893-feat-appset-advanced-templating-using-templatepatch.json new file mode 100644 index 00000000..6c530a51 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-14893-feat-appset-advanced-templating-using-templatepatch.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:27.675Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat(appset): Advanced Templating using templatePatch", + "description": "Signed-off-by: gmuselli \n\n@crenshaw-dev a small proposal for `patchTemplate`\n\nCloses #11164\nCloses https://github.com/argoproj/argo-cd/issues/9177\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [x] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [x] The title of the PR states what changed and the related issues number (used for the release note).\n* [x] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [x] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [x] Does this PR require documentation up", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Signed-off-by: gmuselli \n\n@crenshaw-dev I think you were interested by this one\n\nCloses #11164\nCloses https://github.com/argoproj/argo-cd/issues/9177\n\nMain purpose is to have an ApplicationSet Template as `map[string]interface{}` which can be fully templatable\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: ApplicationSet\nmetadata:\n name: guestbook\nspec:\n goTemplate: true\n generators:\n - list:\n elements:\n - cluster: engineering-dev\n url: https://kubernetes.default.svc\n automated: true\n prune: true\n - cluster: engineering-prod\n url: https://kubernetes.default.svc\n automated: true\n prune: false\n - cluster: engineering-debug\n url: https://kubernetes.default.svc\n automated: false\n prune: false\n template:\n metadata:\n name: '{{.cluster}}'\n spec:\n project: default\n source:\n repoURL: https://github.com/argoproj/argo-cd.git\n targetRevision: HEAD\n path: applicationset/examples/list-generator/guestbook/{{.cluster}}\n destination:\n server: '{{.url}}'\n namespace: guestbook\n syncPolicy:\n # If automated == true, it will generate a key 'automated' which is part of the Application Spec model. It will then be retained\n # If automated == false, it will generate a key 'noAuto' which is not part of the Application Spec model. It will then be ignored\n '{{ ternary \"automated\" \"noAuto\" .automated }}':", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nmetadata:\r\n name: guestbook\r\nspec:\r\n goTemplate: true\r\n generators:\r\n - list:\r\n elements:\r\n - cluster: engineering-dev\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: true\r\n - cluster: engineering-prod\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: false\r\n - cluster: engineering-debug\r\n url: https://kubernetes.default.svc\r\n automated: false\r\n prune: false\r\n template:\r\n metadata:\r\n name: '{{.cluster}}'\r\n spec:\r\n project: default\r\n source:\r\n repoURL: https://github.com/argoproj/argo-cd.git\r\n targetRevision: HEAD\r\n path: applicationset/examples/list-generator/guestbook/{{.cluster}}\r\n destination:\r\n server: '{{.url}}'\r\n namespace: guestbook\r\n syncPolicy:\r\n # If automated == true, it will generate a key 'automated' which is part of the Application Spec model. It will then be retained\r\n # If automated == false, it will generate a key 'noAuto' which is not part of the Application Spec model. It will then be ignored\r\n '{{ ternary \"automated\" \"noAuto\" .automated }}':\r\n # If prune == true, it will generate a key 'prune' which is part of the Application Spec model. It will then be retained\r\n # If prune == false, it will generate a key 'noprune' which is not part of the Application Spec model. It will then be ignored\r\n '{{ ternary \"prune\" \"noprune\" .prune }}': true", + "\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/argoproj/argo-cd/pull/14893?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n\nFor folks watching this issue: I really, really wanted to get it into 2.9 but ran out of time. We did manage to merge this slightly less-powerful tool, which should be sufficient for some use cases: https://github.com/argoproj/argo-cd/pull/14743\n@crenshaw-dev #14743 will save a lot of time. Thanks a lot. In parallel I will rebase this one and keep it ready for 2.10\nHiya, I am having a problem where if `goTemplate: true` is in there my second generator can't get values from my first.", + "The above works fine without `goTemplate: true` but with it I get" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/14893", + "sourceRepo": "argoproj/argo-cd", + "reactions": 55, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:43:27.676Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-15603-feat-auto-refresh-on-new-revisions-during-sync-retries-alpha-11494.json b/solutions/cncf-generated/argo/argo-15603-feat-auto-refresh-on-new-revisions-during-sync-retries-alpha-11494.json new file mode 100644 index 00000000..0889408a --- /dev/null +++ b/solutions/cncf-generated/argo/argo-15603-feat-auto-refresh-on-new-revisions-during-sync-retries-alpha-11494.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:34.573Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: auto-refresh on new revisions during sync retries (Alpha) (#11494)", + "description": "This feature allows users to configure their apps to refresh on new revisions, when the current sync is retrying.\nThis is controlled by a new boolean Application CRD field `syncPolicy.retry.refresh` or via the `--sync-retry-refresh` flag.\n\nCloses https://github.com/argoproj/argo-cd/issues/11494\nRelated to https://github.com/argoproj/argo-cd/issues/6055\nDiscussed at https://www.youtube.com/watch?v=baIX9Bk6f5w&t=1173s \n\nInitially based of the work Sayrus did at https://github.com/Sayrus/argo-cd/commit/817bc3449768021d0d5ad7f1ce7510bcd9d2f486\n\nChecklist:\n\n* [x] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [x] The title of the PR states what changed and the related issues number (used for the release note).\n* [x] The title of the PR conforms to the [Toolchain Guide](https://argo-cd.readthedocs.io/en/latest/developer-", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hey @aslafy-z , thank you for this PR, really amazing feature. \nI have tried to test it , and it not really works for me, \n\nI have guestbook application with simple PreSync hook \n\n```apiVersion: batch/v1\nkind: Job\nmetadata:\n name: before\n annotations:\n argocd.argoproj.io/hook: PreSync\n argocd.argoproj.io/hook-delete-policy: BeforeHookCreation\nspec:\n template:\n spec:\n containers:\n - name: sleep\n image: alpine:latest\n command: [\"sleep\", \"30\", \"exit\", \"1\"]\n restartPolicy: Never\n backoffLimit: 0\n```\n\nAnd once i am doing new commit, i am almost immediately getting such error\n\n\"Снимок", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/argoproj/argo-cd/pull/15603?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n@alexec any inputs?\nHi there, Zadkiel here. I originally signed up to talk about this topic in the contributor meeting but finally could not make it.\nHere is the recording: https://youtu.be/tJYxtn3cO6E?t=1087\nThis is a very annoying missing feature on the ArgoCD side that in my opinion breaks the experience of auto-sync.\n\nPlease give feedback so we can iterate on this topic! 🙏\nHey @aslafy-z , thank you for this PR, really amazing feature. \r\nI have tried to test it , and it not really works for me, \r\n\r\nI have guestbook application with simple PreSync hook", + "And once i am doing new commit, i am almost immediately getting such error\r\n\r\n\"Снимок\r\n\r\n\n@pasha-codefresh Thank you for giving attention to this PR. I reported in the PR comments that it was not triggering refresh when processing hooks: https://github.com/argoproj/argo-cd/pull/15603#discussion_r1340694530\r\nI'm not sure how to resolve it, and would like some experimented contributor or maintainer to help here.\nLooking forward to seeing this merged, even if it doesn't cover all the use cases :-)\nSorry @aslafy-z , missed it! \n@aslafy-z PR looks good to me, could you please add documentation about this feature?\r\n\r\nhttps://argo-cd.readthedocs.io/en/stable/user-guide/auto_sync/\n@todaywasawesome I applied the required change.\r\n@pasha-codefresh I added a sparse documentation for the feature, let me know if it's OK to you.\r\n\r\nI also added the `argocd app set --sync-retry-refresh` flag.\nI will fix the tests asap\n@aslafy-z any news?\nhey @aslafy-z , are you going to work on it?\nHi @pasha-codefresh,\r\n\r\nI couldn't reproduce the issue correctly in the end-to-end tests. I'm quite short on time these days. Please feel free to take over if needed, and any suggestions are welcome.\r\n\n> Hi @pasha-codefresh,\r\n> \r\n> I couldn't reproduce the issue correctly in the end-to-end tests. I'm quite short on time these days. Please feel free to take over if needed, and any suggestions are welcome.\r\n\r\nI think your test case shouldnt wait for `Failed` state because patched selector will not be applied and you will stuck at `Running` state." + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/15603", + "sourceRepo": "argoproj/argo-cd", + "reactions": 33, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:43:34.573Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-16602-fix-app-fix-patch-creation-for-applications-with-valuesobject-fields-.json b/solutions/cncf-generated/argo/argo-16602-fix-app-fix-patch-creation-for-applications-with-valuesobject-fields-.json new file mode 100644 index 00000000..1a30c6ab --- /dev/null +++ b/solutions/cncf-generated/argo/argo-16602-fix-app-fix-patch-creation-for-applications-with-valuesobject-fields-.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:29.487Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: fix(app): Fix patch creation for Applications with ValuesObject fields #15126", + "description": "Fix patch creation for Applications using ValuesObject fields, and add multiple tests to validate that the fix works as expected.\n\nTried to cherry pick and retain commit attribution @vladfr, but that might not have worked out well. Feel free happy to rebase onto a cleaner git history if you prefer, my intention wasnt to take ownership - credits go to your fix.\n\nExtends and tests #15227, Fixes #15126\n\nChecklist:\n\n* [X] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [X] The title of the PR states what changed and the related issues number (used for the release note).\n* [X] The title of the PR conforms to the [Toolchain Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/toolchain-guide/#title-of-the-pr)\n* [X] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close t", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR changes the way patches are computed for annotations and status. Because Helm source can have unstructured fields in `helm.valuesObject`, the strategic patching gives unpredictable behaviour. It was never meant to be used without a `struct` ([see Helm Issue](https://github.com/helm/helm/pull/3459)). So we use `jsonpatch` instead. This should yield the same results, and the library is already present in Argo as a dependency.\n\nChecklist:\n\n* [x] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [x] The title of the PR states what changed and the related issues number (used for the release note).\n* [x] The title of the PR conforms to the [Toolchain Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/toolchain-guide/#title-of-the-pr)\n* [x] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [x] Does this PR require documentation updates? No\n* [ ] I've updated documentation as required by this PR. N/A\n* [x] Optional. My organization is added to USERS.md.\n* [x] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/blob/master/community/CONTRIBUTING.md#legal)\n* [ ] I have written unit and/or", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/16602", + "sourceRepo": "argoproj/argo-cd", + "reactions": 41, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:43:29.487Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-23727-feat-oidc-background-token-refresh.json b/solutions/cncf-generated/argo/argo-23727-feat-oidc-background-token-refresh.json new file mode 100644 index 00000000..80a04d27 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-23727-feat-oidc-background-token-refresh.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:30.682Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: oidc background token refresh", + "description": "Closes #12189 \nAdds support for background OIDC token refresh\n\n- Adds `refreshTokenThreshold` field to `oidc.config` spec. When authentication middleware verifies the current token, the remaining lifetime of the token is compared to the refresh token threshold. The token is refreshed by the server when the lifetime is lower than the threshold and a new token is returned to the client.\n~~- Adds OpenTelemetry tracing for authentication flow~~\n\nTested against keycloak.\n\nChecklist:\n\n* [x] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [x] The title of the PR states what changed and the related issues number (used for the release note).\n* [x] The title of the PR conforms to the [Toolchain Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/toolchain-guide/#title-of-the-pr)\n* [x] I've included \"Closes [ISSU", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@devopsjedi can you resolve conflicts and ping me on here/slack when it is done. 🙇", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition", + "ready-for-review" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/23727", + "sourceRepo": "argoproj/argo-cd", + "reactions": 40, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:43:30.682Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-3554-feat-add-build-support-for-arm-images-2167.json b/solutions/cncf-generated/argo/argo-3554-feat-add-build-support-for-arm-images-2167.json new file mode 100644 index 00000000..37e214f5 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-3554-feat-add-build-support-for-arm-images-2167.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:35.697Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: Add build support for ARM images (#2167)", + "description": "Since ArgoCD is written in Go, the code can be compiled on other architectures as well.\n\nUnfortunately, I haven't found an option for CircleCI to run the build on the ARM architecture, so there are no automated builds for the ARM images.\n\nFor the convenience, I have created a Drone pipeline which supports both arm and arm64 to show the build status for the ARM images for this PR: https://cloud.drone.io/alinbalutoiu/argo-cd/13\n\nImages are being pushed automatically to https://hub.docker.com/r/alinbalutoiu/argocd/tags\n\nTo build the ARM images, you need to run `make armimage` from an arm device (such as a raspberry pi for example).\n\nIt is currently running on my home Raspberry Pi 4 without any issues.\n\nCloses #2167\nCloses #3120 \n\nChecklist:\n\n* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [X] The title of the PR ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> In my opinion it would be better to move out the CLIs from the container image (the ARM CLI would add an extra ~50MB to the size of the image), and replace that with direct links to the GitHub release page. It should also decrease the container image size making the image pull faster. What do you think about that?\n\nYes, that's probably the right to head to. Our image got quite bloated lately, as was also mentioned in #3674. Probably moving out the CLI binaries to their own Docker images would be the way to go, and I like the idea of linking from the downloads page in UI to a GitHub release too. \n\n> Also please note that this PR requires a version bump for kustomize from `3.5.5` to `3.6.1` (I guess it should be similar to this one #3619). I don't really understand how to update the `argoproj/argocd-test-tools` image, but I assume it contains the updated kustomize binary. Should the kustomize version update be part of this PR as well?\n\nThe `argocd-test-tools` image is deprecated for the CI toolchain after the PR I already mentioned is ready & merged. We'll probably stop maintaining it as Docker image too, but let developers build it locally so they have a clean build environment that's up-to-date with what the CI does, so there'll be less nasty surprises after submitting a PR. :)\n\n> Regarding the build process, would you be open to integrate [Drone CI](https://cloud.drone.io/) (free for Open Source projects) as it comes with ARM support? I can help with that if you're interes", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/3554", + "sourceRepo": "argoproj/argo-cd", + "reactions": 32, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:43:35.697Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-6280-feat-extra-helm-values-from-external-git-repo-5826.json b/solutions/cncf-generated/argo/argo-6280-feat-extra-helm-values-from-external-git-repo-5826.json new file mode 100644 index 00000000..b8710e2a --- /dev/null +++ b/solutions/cncf-generated/argo/argo-6280-feat-extra-helm-values-from-external-git-repo-5826.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:22.432Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: Extra Helm values from external git repo #5826", + "description": "# Feature: External Helm values from git\n\nCLOSES #5826\n\nThis PR allows for external values.yaml from other git repos in a helm installation. \n\n**Sample application.yaml**\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: Application\nmetadata:\n name: external-test\n finalizers:\n - resources-finalizer.argocd.argoproj.io\nspec:\n project: default\n source:\n repoURL: https://charts.bitnami.com/bitnami\n targetRevision: 8.5.8\n helm:\n valueFiles:\n - values.yaml\n externalValueFiles:\n - repoURL: https://github.com/KaiReichart/argo-test-values.git\n targetRevision: main\n valueFiles:\n - values.yaml\n chart: mysql\n destination:\n server: 'https://kubernetes.default.svc'\n namespace: default\n```\n\nCurrent State:\n\n - [x] Created API changes\n - [x] correct rendering in repo-server\n - [x] UI changes\n - [x] documentation\n\n**CAVEATS:**\nDue to the way the caching system works, changes in external repos will only be checked when performin", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "Hi @alexmt could help us with the fix here?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: argoproj.io/v1alpha1\r\nkind: Application\r\nmetadata:\r\n name: external-test\r\n finalizers:\r\n - resources-finalizer.argocd.argoproj.io\r\nspec:\r\n project: default\r\n source:\r\n repoURL: https://charts.bitnami.com/bitnami\r\n targetRevision: 8.5.8\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n externalValueFiles:\r\n - repoURL: https://github.com/KaiReichart/argo-test-values.git\r\n targetRevision: main\r\n valueFiles:\r\n - values.yaml\r\n chart: mysql\r\n destination:\r\n server: 'https://kubernetes.default.svc'\r\n namespace: default", + "That would be epic and solve the problem of keeping them in sync (if you use the same repo for the application and values).\r\n\r\nI understand of course that this is not what you're contributing in the PR but do you think it would be a feasible solution to the sync issue?\n@KaiReichart Did you want me to send you another PR to fix the codegen conflict since I caused it? We really need to coordinate this w/ approval and merge, or else it's going to keep cropping up as other PRs get merged. \n@tinkerborg sure please do, this is only going to be a case of running `make codegen` to generate the necessary files. \n> @tinkerborg sure please do, this is only going to be a case of running `make codegen` to generate the necessary files.\r\n\r\nHmm, actually your repo needs to be updated w/ upstream master. I tried fetching your branch, merging argoproj/master and running the codegen. The conflict resolved fine but the commit history on the PR was a mess. Might be easier for you to do it. This is going to need to be repeated every time an upstream change involves codegen, until this branch is merged though...\nI'll do it, but this is the 3rd time I'm doing this and none of the ArgoCD maintainers has even acknowledged this PR, so I'm a bit hesitant about always keeping it up to date with the generated code until the maintainers at least signal any kind of interest in this feature...\n> > @rouke-broersma this is exactly what the field `targetRevision` is for. You could have tags (or branches) in your repos that contain the version specific value files, and then reference these in your `application.yaml`.\r\n> > So you can either reference `main` in `application.yaml` and manually keep your 2 repos in sync or reference a specific version branch and update `application.yaml` when you want to update the application.\r\n> > However this isn't a problem specific to this implementation, but rather to the whole approach of splitting chart and values into different repos.\r\n> \r\n> We are planning to put the application.yaml and the values.yaml in the same repo. Argo could theoretically (perhaps argo already does this?) tag the application with the git revision it comes from (app-of-apps pattern). If I could then specify in the application something akin to\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/6280", + "sourceRepo": "argoproj/argo-cd", + "reactions": 144, + "comments": 61 + }, + "security": { + "scannedAt": "2026-02-27T17:43:22.432Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-8012-feat-add-skipcrds-flag-for-helm-charts.json b/solutions/cncf-generated/argo/argo-8012-feat-add-skipcrds-flag-for-helm-charts.json new file mode 100644 index 00000000..1b8a1714 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-8012-feat-add-skipcrds-flag-for-helm-charts.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:39.640Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: add skipCrds flag for helm charts", + "description": "Closes #6252\n\nChecklist:\n\n* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [x] The title of the PR states what changed and the related issues number (used for the release note).\n* [x] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [x] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [x] Does this PR require documentation updates?\n* [x] I've updated documentation as required by this PR.\n* [ ] Optional. My organization is added to USERS.md.\n* [x] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)\n* [x] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.\n* [x] My bu", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "The folks over at `prometheus-community` decided to take the ball home and close the playground; any help you can provide here would be greatly appreciated.\n\nhttps://github.com/prometheus-community/helm-charts/pull/1510#issuecomment-1007554251", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/argoproj/argo-cd/pull/8012?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) | Coverage Δ | |\n|---|---|---|\n| [pkg/apis/application/v1alpha1/types.go](https://codecov.io/gh/argoproj/argo-cd/pull/8012/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-cGtnL2FwaXMvYXBwbGljYXRpb24vdjFhbHBoYTEvdHlwZXMuZ28=) | `55.32% <ø> (+0.13%)` | :arrow_up: |\n| [util/helm/helmver.go](https://codecov.io/gh/argoproj/argo-cd/pull/8012/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-dXRpbC9oZWxtL2hlbG12ZXIuZ28=) | `80.00% <ø> (ø)` | |\n| [cmd/util/app.go](https://codecov.io/gh/argoproj/argo-cd/pull/8012/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-Y21kL3V0aWwvYXBwLmdv) | `47.10% <60.00%> (+0.16%)` | :arrow_up: |\n| [reposerver/repository/repository.go](https://codecov.io/gh/argoproj/argo-cd/pull/8012/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-cmVwb3NlcnZlci9yZXBvc2l0b3J5L3JlcG9zaXRvcnkuZ28=) | `57.85% <100.00%> (-0.79%)` | :arrow_down: |\n| [util/helm/cmd.go](https://codecov.io/gh/argoproj/argo-cd/pull/8012/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-dXRpbC9oZWxtL2NtZC5nbw==) | `28.65% <100.00%> (ø)` | |\n| [controller/sync.go](https://codecov.io/gh/argoproj/argo-cd/pull/8012/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-Y29udHJvbGxlci9zeW5jLmdv) | `57.51% <0.00%> (-7.63%)` | :arrow_down: |\n| [server/application/application.go](https://codecov.io/gh/argoproj/argo-cd/pull/8012/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-c2VydmVyL2FwcGxpY2F0aW9uL2FwcGxpY2F0aW9uLmdv) | `31.20% <0.00%> (-1.57%)` | :arrow_down: |\n| [util/session/sessionmanager.go](https://codecov.io/gh/argoproj/argo-cd/pull/8012/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-dXRpbC9zZXNzaW9uL3Nlc3Npb25tYW5hZ2VyLmdv) | `68.75% <0.00%> (-1.37%)` | :arrow_down: |\n| [server/cluster/cluster.go](https://codecov.io/gh/argoproj/argo-cd/pull/8012/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-c2VydmVyL2NsdXN0ZXIvY2x1c3Rlci5nbw==) | `26.69% <0.00%> (-0.67%)` | :arrow_down: |\n| ... and [19 more](https://codecov.io/gh/argoproj/argo-cd/pull/8012/diff?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) | |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/argoproj/argo-cd/pull/8012?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/argoproj/argo-cd/pull/8012?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). Last update [99d1dca...fece925](https://codecov.io/gh/argoproj/argo-cd/pull/8012?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n\n\r\n> E2E tests have caught an issue: we still support helm2 . The `--skip-crds` flag should be added only for Helm3. @patst , can you please fix it.\r\n\r\n@alexmt : thanks for your feedback.\r\n\r\nI missed the one condition which makes sure `-include_crds` is not passed for helm2 if `skip-crds` is unset. I think this is fixed now. Would be great to have the E2E tests run again \nthe Lint docs failed due to an unrelated issue because `https://grafana.apps.argoproj.io/` wasn't reachable at the moment:" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/8012", + "sourceRepo": "argoproj/argo-cd", + "reactions": 24, + "comments": 8 + }, + "security": { + "scannedAt": "2026-02-27T17:43:39.640Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-8123-feat-add-support-for-server-side-apply.json b/solutions/cncf-generated/argo/argo-8123-feat-add-support-for-server-side-apply.json new file mode 100644 index 00000000..583d5696 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-8123-feat-add-support-for-server-side-apply.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:46.396Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: add support for server-side apply", + "description": "Fixes: https://github.com/argoproj/argo-cd/issues/2267\nRequires: https://github.com/argoproj/gitops-engine/pull/363\n\nChecklist:\n\n* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [ ] The title of the PR states what changed and the related issues number (used for the release note).\n* [x] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [ ] Does this PR require documentation updates?\n* [ ] I've updated documentation as required by this PR.\n* [ ] Optional. My organization is added to USERS.md.\n* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)\n* [ ] I hav", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thanks a lot for this effort, @sathieu. \n\nWe have discussed this in yesterday's contributors meeting and came to the conclusion that we may need some more design thoughts around integrating SSA into Argo CD. Many people are waiting for Server Side Apply feature for various use-cases, and therefore we should look at all the known requirements before we pull it into Argo CD. Worst case at a quick shot would be that we may end up in a situation having to introduce breaking changes for going forward.\n\nWould you be willing to collaborate with us in this design, and then adapt the PRs to reflect the design decisions?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "PatchOptions.meta.k8s.io \"\" is invalid: fieldManager: Required value: is required for apply patch" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/8123", + "sourceRepo": "argoproj/argo-cd", + "reactions": 20, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:43:46.396Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-9703-feat-add-dark-theme-to-ui.json b/solutions/cncf-generated/argo/argo-9703-feat-add-dark-theme-to-ui.json new file mode 100644 index 00000000..924e73ad --- /dev/null +++ b/solutions/cncf-generated/argo/argo-9703-feat-add-dark-theme-to-ui.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:38.380Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: add dark theme to UI", + "description": "closes #4722\n\nDependent on argo-ui PR - https://github.com/argoproj/argo-ui/pull/245\n\nDemo video \n\nhttps://user-images.githubusercontent.com/17771352/174284549-5c828678-05df-4242-99b9-425a36b3783c.mp4\n\n- There are a few components like dropdowns, popups and autocomplete that are still not themed. They render differently and can use a separate PR and theme variable would be needed to passed as a prop.\n\nshould close #4722\n\nInitial Description that is no longer valid\nSteps to build and run, checkout to `argo-ui` **PR branch** in your local. \nRun `yalc publish`\nThen in this argocd branch `cd ui`\nRun `yalc add argo-ui`\n`cd ..`\n`make start`\n- Lint is failing because I'm importing a package from argo-ui that is still not merged.\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [ ] Either (a) I've created an [enhancemen", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "View steps to test in the upstream ticket \nUpstream ticket - https://github.com/argoproj/argo-cd/pull/9703\n\nThe main logic for creating the theme map is in this file - https://github.com/argoproj/argo-ui/pull/245/files#diff-9be47d1a6668e34bf14bd39d2c830ecd572725cac9912f2bcd5e9b6d512ec0f1\n\nIn this approach, it will be also easier to add more themes (light, dark, some other theme). Only theme.scss would need to be updated to add a new theme. \n\nAll the other places is just adding the `@include themify` directive.\nOnly the background colours are changed. \nSome elements like drop downs and popups are not updated because of more complexity, can be updated after this is merged.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/9703", + "sourceRepo": "argoproj/argo-cd", + "reactions": 29, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:43:38.380Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-9711-feat-introduces-server-side-apply-as-sync-option.json b/solutions/cncf-generated/argo/argo-9711-feat-introduces-server-side-apply-as-sync-option.json new file mode 100644 index 00000000..de687c0c --- /dev/null +++ b/solutions/cncf-generated/argo/argo-9711-feat-introduces-server-side-apply-as-sync-option.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:45.285Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: Introduces Server-Side Apply as sync option", + "description": "This PR implements the Server-Side Apply [proposal](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/server-side-apply.md).\n\nAchieved goals:\n* [x] [[G-1] Fine grained configuration](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/server-side-apply.md#g-1-fine-grained-configuration)\n* [x] [[G-2] Strategic merge patch while diffing](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/server-side-apply.md#g-2-strategic-merge-patch-while-diffing)\n* [ ] [[G-3] Admission Controllers compatibility](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/server-side-apply.md#g-3-admission-controllers-compatibility)\n* [x] [[G-4] Conflict management](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/server-side-apply.md#g-4-conflict-management)\n* [x] [[G-5] Register a proper manager](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/server-side-apply.md#g-5-register-a-proper-manager)\n\nFix https://github.com/argoproj/argo-cd", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Signed-off-by: Leonardo Luz Almeida \n\nArgoCD Server-Side Apply proposal\n\nChecklist:\n\n* [x] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [x] The title of the PR states what changed and the related issues number (used for the release note).\n* [x] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [ ] Does this PR require documentation updates?\n* [ ] I've updated documentation as required by this PR.\n* [ ] Optional. My organization is added to USERS.md.\n* [x] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)\n* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.\n* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/argoproj/argo-cd/pull/9711?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) | Coverage Δ | |\n|---|---|---|\n| [cmd/argocd/commands/app.go](https://codecov.io/gh/argoproj/argo-cd/pull/9711/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-Y21kL2FyZ29jZC9jb21tYW5kcy9hcHAuZ28=) | `20.32% <0.00%> (-0.06%)` | :arrow_down: |\n| [controller/sync.go](https://codecov.io/gh/argoproj/argo-cd/pull/9711/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-Y29udHJvbGxlci9zeW5jLmdv) | `56.09% <ø> (ø)` | |\n| [util/argo/managedfields/managed\\_fields.go](https://codecov.io/gh/argoproj/argo-cd/pull/9711/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-dXRpbC9hcmdvL21hbmFnZWRmaWVsZHMvbWFuYWdlZF9maWVsZHMuZ28=) | `67.27% <ø> (+25.22%)` | :arrow_up: |\n| [controller/state.go](https://codecov.io/gh/argoproj/argo-cd/pull/9711/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-Y29udHJvbGxlci9zdGF0ZS5nbw==) | `73.89% <33.33%> (-0.33%)` | :arrow_down: |\n| [util/argo/diff/diff.go](https://codecov.io/gh/argoproj/argo-cd/pull/9711/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-dXRpbC9hcmdvL2RpZmYvZGlmZi5nbw==) | `52.41% <60.00%> (+0.20%)` | :arrow_up: |\n| [util/db/gpgkeys.go](https://codecov.io/gh/argoproj/argo-cd/pull/9711/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-dXRpbC9kYi9ncGdrZXlzLmdv) | `77.14% <0.00%> (-1.97%)` | :arrow_down: |\n| [util/gpg/gpg.go](https://codecov.io/gh/argoproj/argo-cd/pull/9711/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-dXRpbC9ncGcvZ3BnLmdv) | `66.96% <0.00%> (-1.25%)` | :arrow_down: |\n| [util/oidc/oidc.go](https://codecov.io/gh/argoproj/argo-cd/pull/9711/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-dXRpbC9vaWRjL29pZGMuZ28=) | `57.70% <0.00%> (+1.58%)` | :arrow_up: |\n| [pkg/apis/application/v1alpha1/app\\_project\\_types.go](https://codecov.io/gh/argoproj/argo-cd/pull/9711/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-cGtnL2FwaXMvYXBwbGljYXRpb24vdjFhbHBoYTEvYXBwX3Byb2plY3RfdHlwZXMuZ28=) | `59.61% <0.00%> (+5.21%)` | :arrow_up: |\n\nHelp us with your feedback. Take ten seconds to tell us [how you rate us](https://about.codecov.io/nps?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). Have a feature suggestion? [Share it here.](https://app.codecov.io/gh/feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj)\n\nI enabled server side apply at the app level right now like below:", + "I also tried our service port case mentioned in https://github.com/argoproj/argo-cd/pull/8812#discussion_r849140565 but unfortunately, it doesn't work and threw error during diff phase. The error is:\r\n\r\n![image](https://user-images.githubusercontent.com/25150124/177224413-66dea4d1-a8dd-4a1f-b109-edeef27e5803.png)\r\n\r\nHopefully, the error would be helpful for you to troubleshoot. Thanks for the great work so far!\n> I also tried our service port case mentioned in [#8812 (comment)](https://github.com/argoproj/argo-cd/pull/8812#discussion_r849140565) but unfortunately, it doesn't work and threw error during diff phase. The error is:\r\n> \r\n> ![image](https://user-images.githubusercontent.com/25150124/177224413-66dea4d1-a8dd-4a1f-b109-edeef27e5803.png)\r\n> \r\n> Hopefully, the error would be helpful for you to troubleshoot. Thanks for the great work so far!\r\n\r\nHi @yeya24, I created a test with your service example as part of https://github.com/argoproj/gitops-engine/pull/418 and the test is passing. From the error message, it seems that there is something duplicated in your ports list. Can you please double check if you have duplicated ports with the same name?\n> #8812 (comment)\r\n\r\nI was using the same manifest but I still got this error. No duplicate port name.", + "> I was using the same manifest but I still got this error. No duplicate port name.\r\n\r\nCan you pls try applying this manifest and provide me the results?" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/9711", + "sourceRepo": "argoproj/argo-cd", + "reactions": 21, + "comments": 6 + }, + "security": { + "scannedAt": "2026-02-27T17:43:45.285Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-9755-feat-applications-in-any-namespace.json b/solutions/cncf-generated/argo/argo-9755-feat-applications-in-any-namespace.json new file mode 100644 index 00000000..27b9bf1c --- /dev/null +++ b/solutions/cncf-generated/argo/argo-9755-feat-applications-in-any-namespace.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:32.422Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: feat: Applications in any namespace", + "description": "This change enables Application resources to exist in any namespace allowed by configuration.\n\nThe feature is *not* enabled by default, and has to be explicitly enabled by the administrator (see below).\n\nThe change aims to be fully backwards compatible.\n\nThis is a rather large change. It comprises changes to the controller's reconciliation logic, the API server as well as changes to the CLI and the UI. I try to outline the changes and design decisions best I can in the below description.\n\nThis PR implements proposal #6409 and supersedes the PoC PR #6537", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR is a PoC to prove that the proposal created at #6409 actually can be implemented with changes that are not too intrusive. \n\nDespite the PR looks big (>=48 files changes), most of the changes are about how things (e.g. application names) are referenced (e.g. application name). There's not much changes to complexity imho.\n\nThis shall not be merged. It serves merely as a demonstration.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "https://argocd.example.com/applications/guestbook", + "https://argocd.example.com/applications/argocd/guestbook", + "GET /api/v1/applications/guestbook?appNamespace=foo" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/9755", + "sourceRepo": "argoproj/argo-cd", + "reactions": 37, + "comments": 48 + }, + "security": { + "scannedAt": "2026-02-27T17:43:32.422Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-9895-fix-unexpected-reserved-bits-breaking-web-terminal-9605.json b/solutions/cncf-generated/argo/argo-9895-fix-unexpected-reserved-bits-breaking-web-terminal-9605.json new file mode 100644 index 00000000..e465adef --- /dev/null +++ b/solutions/cncf-generated/argo/argo-9895-fix-unexpected-reserved-bits-breaking-web-terminal-9605.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:43.431Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "argo: fix: 'unexpected reserved bits' breaking web terminal (#9605)", + "description": "Fixes #9605 \nFixes #9641\nFixes #9643\n\nHi I'm Michael, and I suck at concurrent programming.\n\n**But** [another issue](https://github.com/gorilla/websocket/issues/373) directed me to run web terminal with the race detector enabled.\n\nAnd I got a bunch of text that I didn't understand.\n\n
\nVery boring race detector output\n\n```\n==================\nWARNING: DATA RACE\nWrite at 0x00c0016f0008 by goroutine 44:\n runtime.racewriterange()\n :1 +0x29\n internal/poll.ignoringEINTRIO()\n /usr/local/go/src/internal/poll/fd_unix.go:794 +0x44b\n internal/poll.(*FD).Read()\n /usr/local/go/src/internal/poll/fd_unix.go:163 +0x26\n net.(*netFD).Read()\n /usr/local/go/src/net/fd_posix.go:55 +0x50\n net.(*conn).Read()\n /usr/local/go/src/net/net.go:183 +0xb0\n net.(*TCPConn).Read()\n :1 +0x64\n github.com/soheilhy/cmux.(*bufferedReader).Read()\n /Users/mcrenshaw/go/src/github.com/argoproj/argo-cd/vendor/github.com/soheilhy/cmu", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@yeya24 do you have time to take a glance at this? Any thoughts on also putting `WriteMessage` in a mutex?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition", + "cherry-pick-2-4" + ], + "category": "workloads", + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/argoproj/argo-cd/pull/9895", + "sourceRepo": "argoproj/argo-cd", + "reactions": 21, + "comments": 7 + }, + "security": { + "scannedAt": "2026-02-27T17:43:43.432Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/armada/armada-2111-quickly-populate-lookout-db.json b/solutions/cncf-generated/armada/armada-2111-quickly-populate-lookout-db.json new file mode 100644 index 00000000..c3481a7f --- /dev/null +++ b/solutions/cncf-generated/armada/armada-2111-quickly-populate-lookout-db.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:29.323Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "armada: Quickly populate lookout db", + "description": "#### What type of PR is this?\nFeature\n#### What this PR does / why we need it:\nThis Go tool directly inserts jobs into the Lookout database and simulates each job's state change ( from queued -> pending -> running, after that some of them randomly being failed, cancelled or succeeded ) without the need of submitting real jobs into armada which requires the whole system to be up and running. \nThis will be helpful in UI/API testing by simulating any desired number of submitted jobs.\n#### Which issue(s) this PR fixes:\n\nFixes #1742", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> I think we would like to see some integration tests with the CLI.\n> \n> Assuming you have a postgres db can you write some test cases that verify this functionality?\n\n@kannon92 I am confused a little bit, can you elaborate which tests you are talking about here?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "docker run -d --name=postgres $(DOCKER_NET) -p 5432:5432 -e POSTGRES_PASSWORD=psw postgres:14.2\r\n sleep 3\r\n function tearDown { docker rm -f redis postgres; }; trap tearDown EXIT" + ] + } + }, + "metadata": { + "tags": [ + "armada", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "armada" + ], + "targetResourceKinds": [ + "Job" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/armadaproject/armada/pull/2111", + "sourceRepo": "armadaproject/armada", + "reactions": 0, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:47:29.323Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-1352-fix-output-remove-refreshing-state-from-output.json b/solutions/cncf-generated/atlantis/atlantis-1352-fix-output-remove-refreshing-state-from-output.json new file mode 100644 index 00000000..69027e89 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-1352-fix-output-remove-refreshing-state-from-output.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:42.250Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: fix(output): Remove Refreshing state... from output", + "description": "Fixes #1306\n\nSince Terraform 0.14.0 there are no separator between refreshing plan and the plan. This strategy is to find the last line with \"Refreshing state...\" and remove it with all of the above.\n\nI remove the sentence \"Refreshing Terraform state in-memory prior to plan...\" from tests because Terraform don't show that anymore.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This reverts commit 7301febcb4409e05fe78d8b18dfb220413a8191f.\n\nFixtures weren't updated here:\nhttps://github.com/runatlantis/atlantis/pull/1352", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/1352", + "sourceRepo": "runatlantis/atlantis", + "reactions": 16, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:47:42.250Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-1392-feature-add-head-commit-env-var.json b/solutions/cncf-generated/atlantis/atlantis-1392-feature-add-head-commit-env-var.json new file mode 100644 index 00000000..21e86b56 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-1392-feature-add-head-commit-env-var.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:34.047Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: Feature: Add HEAD_COMMIT env var", + "description": "Resolves #1390", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Not sure why tests are failing in circleci. I cant reproduce it locally:\n\n```\n$ make test-coverage\n? github.com/runatlantis/atlantis [no test files]\nok github.com/runatlantis/atlantis/cmd 0.064s coverage: 86.4% of statements\n\nok github.com/runatlantis/atlantis/server 106.773s coverage: 58.0% of statements\nok github.com/runatlantis/atlantis/server/events 0.989s coverage: 83.6% of statements\nok github.com/runatlantis/atlantis/server/events/db 0.827s coverage: 84.4% of statements\nok github.com/runatlantis/atlantis/server/events/locking 0.023s coverage: 100.0% of statements\n? github.com/runatlantis/atlantis/server/events/matchers [no test files]\nok github.com/runatlantis/atlantis/server/events/models 0.039s coverage: 79.6% of statements\n? github.com/runatlantis/atlantis/server/events/models/fixtures [no test files]\nok github.com/runatlantis/atlantis/server/events/runtime 0.107s coverage: 91.7% of statements\nok github.com/runatlantis/atlantis/server/events/terraform 1.158s coverage: 85.0% of statements\nok github.com/runatlantis/atlantis/server/events/vcs 7.391s coverage: 65.6% of statements\nok github.com/runatlantis/atlantis/server/events/vcs/bitbucketcloud 0.049s coverage: 61.3% of statements\nok github.com/runatlantis/atlantis/server/events/vcs/bitbucketserver 0.049s coverage: 48.9% of statements\nok github.com/runatlantis/atlantis/server/events", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ make test-coverage\r\n? github.com/runatlantis/atlantis [no test files]\r\nok github.com/runatlantis/atlantis/cmd 0.064s coverage: 86.4% of statements\r\n\r\nok github.com/runatlantis/atlantis/server 106.773s coverage: 58.0% of statements\r\nok github.com/runatlantis/atlantis/server/events 0.989s coverage: 83.6% of statements\r\nok github.com/runatlantis/atlantis/server/events/db 0.827s coverage: 84.4% of statements\r\nok github.com/runatlantis/atlantis/server/events/locking 0.023s coverage: 100.0% of statements\r\n? github.com/runatlantis/atlantis/server/events/matchers [no test files]\r\nok github.com/runatlantis/atlantis/server/events/models 0.039s coverage: 79.6% of statements\r\n? github.com/runatlantis/atlantis/server/events/models/fixtures [no test files]\r\nok github.com/runatlantis/atlantis/server/events/runtime 0.107s coverage: 91.7% of statements\r\nok github.com/runatlantis/atlantis/server/events/terraform 1.158s coverage: 85.0% of statements\r\nok github.com/runatlantis/atlantis/server/events/vcs 7.391s coverage: 65.6% of statements\r\nok github.com/runatlantis/atlantis/server/events/vcs/bitbucketcloud 0.049s coverage: 61.3% of statements\r\nok github.com/runatlantis/atlantis/server/events/vcs/bitbucketserver 0.049s coverage: 48.9% of statements\r\nok github.com/runatlantis/atlantis/server/events/vcs/common 0.038s coverage: 100.0% of statements\r\n? github.com/runatlantis/atlantis/server/events/vcs/fixtures [no test files]\r\nok github.com/runatlantis/atlantis/server/events/webhooks 0.020s coverage: 88.4% of statements\r\nok github.com/runatlantis/atlantis/server/events/yaml 0.027s coverage: 97.7% of statements\r\nok github.com/runatlantis/atlantis/server/events/yaml/raw 0.028s coverage: 71.0% of statements\r\nok github.com/runatlantis/atlantis/server/events/yaml/valid 0.003s coverage: 84.4% of statements\r\nok github.com/runatlantis/atlantis/server/logging 0.002s coverage: 0.0% of statements [no tests to run]\r\nok github.com/runatlantis/atlantis/server/recovery 0.002s coverage: 0.0% of statements [no tests to run]\r\n? github.com/runatlantis/atlantis/testdrive [no test files]", + "$ make test\r\n? github.com/runatlantis/atlantis [no test files]\r\nok github.com/runatlantis/atlantis/cmd (cached)\r\nok github.com/runatlantis/atlantis/server (cached)\r\nok github.com/runatlantis/atlantis/server/events (cached)\r\nok github.com/runatlantis/atlantis/server/events/db (cached)\r\nok github.com/runatlantis/atlantis/server/events/locking (cached)\r\n? github.com/runatlantis/atlantis/server/events/matchers [no test files]\r\nok github.com/runatlantis/atlantis/server/events/models (cached)\r\n? github.com/runatlantis/atlantis/server/events/models/fixtures [no test files]\r\nok github.com/runatlantis/atlantis/server/events/runtime (cached)\r\nok github.com/runatlantis/atlantis/server/events/terraform (cached)\r\nok github.com/runatlantis/atlantis/server/events/vcs (cached)\r\nok github.com/runatlantis/atlantis/server/events/vcs/bitbucketcloud (cached)\r\nok github.com/runatlantis/atlantis/server/events/vcs/bitbucketserver (cached)\r\nok github.com/runatlantis/atlantis/server/events/vcs/common (cached)\r\n? github.com/runatlantis/atlantis/server/events/vcs/fixtures [no test files]\r\nok github.com/runatlantis/atlantis/server/events/webhooks (cached)\r\nok github.com/runatlantis/atlantis/server/events/yaml (cached)\r\nok github.com/runatlantis/atlantis/server/events/yaml/raw (cached)\r\nok github.com/runatlantis/atlantis/server/events/yaml/valid (cached)\r\nok github.com/runatlantis/atlantis/server/logging (cached) [no tests to run]\r\nok github.com/runatlantis/atlantis/server/recovery (cached) [no tests to run]\r\n? github.com/runatlantis/atlantis/testdrive [no test files]" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/1392", + "sourceRepo": "runatlantis/atlantis", + "reactions": 21, + "comments": 5 + }, + "security": { + "scannedAt": "2026-02-27T17:47:34.047Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-2002-fix-retry-files-requests-to-github.json b/solutions/cncf-generated/atlantis/atlantis-2002-fix-retry-files-requests-to-github.json new file mode 100644 index 00000000..1423b7c8 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-2002-fix-retry-files-requests-to-github.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:48.001Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: fix: retry /files/ requests to github", + "description": "Similar to #1131, we see this for /files/ too, resulting in a plan\nerror.\n\nCloses #1905", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is a follow on to resolve similar issues to #1019.\n\nIn #1131 retries were added to GetPullRequest. And in #1810 a backoff was included.\n\nHowever, those only resolve one potential request at the very beginning of a PR creation. The other request that happens early on during auto-plan is one to ListFiles to detect the modified files. This too can sometimes result in a 404 due to async updates on the GitHub side.\n\n---\n\nMy team recently upgraded a few Atlantis instances that were pretty old. They didn't yet include the fixes described above.\n\nWe upgraded to v0.18.1. After upgrading we were hopeful our dev teams would be happy to know these race condition errors were a thing of the past. But in only a couple of days, we got another report!\n\nI was able to find an error log with the following message (org/repo/pr-number redacted):\n```\nGET https://api.github.com/repos/{owner}/{repo}/pulls/{number}/files?per_page=300: 404 Not Found []\n```\n\nAnd the following stacktrace:\n```\ngithub.com/runatlantis/atlantis/server/events.(*PullUpdater).updatePull\n\tgithub.com/runatlantis/atlantis/server/events/pull_updater.go:14\ngithub.com/runatlantis/atlantis/server/events.(*PlanCommandRunner).runAutoplan\n\tgithub.com/runatlantis/atlantis/server/events/plan_command_runner.go:77\ngithub.com/runatlantis/atlantis/server/events.(*PlanCommandRunner).Run\n\tgithub.com/runatlantis/atlantis/server/events/plan_command_runner.go:221\ngithub.com/runatlantis/atlantis/server/events.(*DefaultCommandRunner).RunAutoplan", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "GET https://api.github.com/repos/{owner}/{repo}/pulls/{number}/files?per_page=300: 404 Not Found []", + "github.com/runatlantis/atlantis/server/events.(*PullUpdater).updatePull\r\n\tgithub.com/runatlantis/atlantis/server/events/pull_updater.go:14\r\ngithub.com/runatlantis/atlantis/server/events.(*PlanCommandRunner).runAutoplan\r\n\tgithub.com/runatlantis/atlantis/server/events/plan_command_runner.go:77\r\ngithub.com/runatlantis/atlantis/server/events.(*PlanCommandRunner).Run\r\n\tgithub.com/runatlantis/atlantis/server/events/plan_command_runner.go:221\r\ngithub.com/runatlantis/atlantis/server/events.(*DefaultCommandRunner).RunAutoplanCommand\r\n\tgithub.com/runatlantis/atlantis/server/events/command_runner.go:163" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/2002", + "sourceRepo": "runatlantis/atlantis", + "reactions": 12, + "comments": 3 + }, + "security": { + "scannedAt": "2026-02-27T17:47:48.001Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-2055-add-support-for-stages-of-plans.json b/solutions/cncf-generated/atlantis/atlantis-2055-add-support-for-stages-of-plans.json new file mode 100644 index 00000000..d3f47c1f --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-2055-add-support-for-stages-of-plans.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:53.135Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: Add support for stages of plans", + "description": "Hello!\n\nThis is an initial implementation for #972 , to allow atlantis to only plan / apply projects when their dependencies have already been applied (or not modified).\n\nThis is useful for terragrunt or when having a monorepo with projects referencing others via remote state. If using with terragrunt, the follow tool is useful for automatically creating the dependency tree (but will need updating to support the new config) https://github.com/transcend-io/terragrunt-atlantis-config.\n\n# Planning\n\nProjects are grouped by dependencies (optionally: unmodified transient dependencies can be included) and planned by group. If all projects in a group come back with a clean plan (terraform outputs `no infrastructure changes...`), the next group is automatically planned. If any projects in a group have a plan that needs applying, subsequent groups of projects have their `ProjectPlanStatus` set to `PendingDependencyAppliedStatus`\n\n# Applying\n\nApplying is done as normal. If a project has a plan, i", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This implements:\nhttps://github.com/runatlantis/atlantis/issues/1259\n\nfeat(1259): atlantis destroy order\n\nWhile creating the directed acyclic graph, it checks if the\nterragrunt.hcl file of the stacks contains our destroy flag:\n`# ATLANTIS_PLEASE_DESTROY_STACK`\n\nIf this flag is found, the corresponding edge will be reverted, i.e.\ninstead of u -> v, we will get v -> u.\n\nfeat(1259): topological sort: fix and debugging\n\n* this will output the dependency graph in *.dot format\n* it will make the plan fail if the TopSort() fails for whatever reason\n* sorting is not necessary when there is just one project\n* dynamic folder/file generation\n* remove some unnecessary else branches\n* add a test for FindProjectNo()\n* and most important: fix FindProjectNo() so that the sorting is correct\n\nNew dependency\n\n- https://github.com/yourbasic/graph", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "- autoplan:\r\n enabled: true\r\n when_modified:\r\n - '*.hcl'\r\n - '*.tf*'\r\n - ../../account.hcl\r\n - ../env.hcl\r\n - ../../region.hcl\r\n dir: terraform/uat/ap-southeast-1/uat/network\r\n workflow: uat\r\n workspace: terraform_uat_ap-southeast-1_uat_network\r\n\r\n- autoplan:\r\n enabled: true\r\n when_modified:\r\n - '*.hcl'\r\n - '*.tf*'\r\n - ../../account.hcl\r\n - ../env.hcl\r\n - ../network/terragrunt.hcl\r\n - ../../region.hcl\r\n dir: terraform/uat/ap-southeast-1/uat/ecs_cluster\r\n workflow: uat\r\n workspace: terraform_uat_ap-southeast-1_uat_ecs_cluster\r\n depends_on:\r\n - terraform/uat/ap-southeast-1/uat/network\r\n\r\n- autoplan:\r\n enabled: true\r\n when_modified:\r\n - '*.hcl'\r\n - '*.tf*'\r\n - ../../account.hcl\r\n - ../env.hcl\r\n - ../ecs_cluster/terragrunt.hcl\r\n - ../network/terragrunt.hcl\r\n - ../../region.hcl\r\n - ../service_ecs_task_definition/terragrunt.hcl\r\n - ../network/terragrunt.hcl\r\n - ../service_registry/terragrunt.hcl\r\n dir: terraform/uat/ap-southeast-1/uat/service_ecs_service\r\n workflow: uat\r\n workspace: terraform_uat_ap-southeast-1_uat_service_ecs_service\r\n depends_on:\r\n - terraform/uat/ap-southeast-1/uat/network\r\n - terraform/uat/ap-southeast-1/uat/ecs_cluster\r\n - terraform/uat/ap-southeast-1/uat/service_ecs_task_definition\r\n\r\n- autoplan:\r\n enabled: true\r\n when_modified:\r\n - '*.hcl'\r\n - '*.tf*'\r\n - ../../account.hcl\r\n - ../env.hcl\r\n - ../../region.hcl\r\n dir: terraform/uat/ap-southeast-1/uat/service_ecs_task_definition\r\n workflow: uat\r\n workspace: terraform_uat_ap-southeast-1_uat_service_ecs_task_definition\r\n depends_on:\r\n - terraform/uat/ap-southeast-1/uat/network\r\n - terraform/uat/ap-southeast-1/uat/ecs_cluster" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "feature", + "waiting-on-response" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/2055", + "sourceRepo": "runatlantis/atlantis", + "reactions": 9, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:47:53.135Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-2261-feat-stream-output-for-custom-workflows.json b/solutions/cncf-generated/atlantis/atlantis-2261-feat-stream-output-for-custom-workflows.json new file mode 100644 index 00000000..e7cd834a --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-2261-feat-stream-output-for-custom-workflows.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:40.615Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat: stream output for custom workflows", + "description": "Fixes #2054 \n\nI noticed that in #1937 which originally added this feature (e.g. https://github.com/runatlantis/atlantis/pull/1937/files#diff-edf527ba8643ff7bfca5f560491ea7055af472f5d6f3bbda127f1776b63d4b06L179) that the documentation around setting up `terragrunt` for custom workflows removed the `-no-color` option.\n\nI'm not sure if this was by mistake, but to allow colorization in the in-browser terminal I've added `ansi.Strip()` for parsing all command output from the new runner abstraction. If this is out of scope I can back those changes out (but we should probably update the docs to re-add the `-no-color` flags to `terragrunt` workflows, otherwise the output is mangled in e.g. GitHub comments).\n\nTested by updating my atlantis instance to a build of this branch and removing the `-no-color` flags from my workflow.\n\nI'm using the following server configuration:\n\n```yaml\nrepos:\n - id: \"/.*/\"\n workflow: terragrunt\n pre_workflow_hooks:\n - run: >\n terragrunt-atlantis", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "you could check : https://github.com/runatlantis/atlantis/pull/1751 and https://github.com/runatlantis/atlantis/pull/1325 or ask the contributors.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "repos:\r\n - id: \"/.*/\"\r\n workflow: terragrunt\r\n pre_workflow_hooks:\r\n - run: >\r\n terragrunt-atlantis-config generate\r\n --output atlantis.yaml\r\n --autoplan --automerge\r\nworkflows:\r\n terragrunt:\r\n plan:\r\n steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - run: terragrunt plan -out=$PLANFILE\r\n - run: terragrunt show -json $PLANFILE > $SHOWFILE\r\n apply:\r\n steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - run: terragrunt apply $PLANFILE", + "version: 3\r\nautomerge: true\r\ndelete_source_branch_on_merge: true\r\nprojects:\r\n - name: aibse\r\n dir: teams/aibse\r\n workflow: terragrunt\r\n terraform_version: v1.2.2\r\n autoplan:\r\n when_modified: [\"*.tf\", \"*.hcl\"]\r\n enabled: true\r\n - name: platform\r\n dir: teams/platform\r\n workflow: terragrunt\r\n terraform_version: v1.2.2\r\n autoplan:\r\n when_modified: [\"*.tf\", \"*.hcl\"]\r\n enabled: true\r\nworkflows:\r\n terragrunt:\r\n plan:\r\n steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - run: terragrunt plan -out=$PLANFILE\r\n - run: terragrunt show -json $PLANFILE > $SHOWFILE\r\n apply:\r\n steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - run: terragrunt apply $PLANFILE", + "# repos lists the config for specific repos.\r\nrepos:\r\n # id can either be an exact repo ID or a regex.\r\n # If using a regex, it must start and end with a slash.\r\n # Repo ID's are of the form {VCS hostname}/{org}/{repo name}, ex.\r\n # github.com/runatlantis/atlantis.\r\n - id: /.*/\r\n # branch is an regex matching pull requests by base branch\r\n # (the branch the pull request is getting merged into).\r\n # By default, all branches are matched\r\n branch: main\r\n # apply_requirements sets the Apply Requirements for all repos that match.\r\n apply_requirements: [mergeable, undiverged]\r\n # workflow sets the workflow for all repos that match.\r\n # This workflow must be defined in the workflows section.\r\n workflow: default\r\n # allowed_overrides specifies which keys can be overridden by this repo in\r\n # its atlantis.yaml file.\r\n allowed_overrides: [apply_requirements, workflow, delete_source_branch_on_merge]\r\n # allowed_workflows specifies which workflows the repos that match\r\n # are allowed to select.\r\n allowed_workflows: [default]\r\n # allow_custom_workflows defines whether this repo can define its own\r\n # workflows. If false (default), the repo can only use server-side defined\r\n # workflows.\r\n allow_custom_workflows: true\r\n # delete_source_branch_on_merge defines whether the source branch would be deleted on merge\r\n # If false (default), the source branch won't be deleted on merge\r\n delete_source_branch_on_merge: false\r\n # pre_workflow_hooks defines arbitrary list of scripts to execute before workflow execution.\r\n #pre_workflow_hooks:\r\n # - run: my-pre-workflow-hook-command arg1\r\n # post_workflow_hooks defines arbitrary list of scripts to execute after workflow execution.\r\n #post_workflow_hooks:\r\n # - run: my-post-workflow-hook-command arg1\r\n# workflows lists server-side custom workflows\r\nworkflows:\r\n default:\r\n plan:\r\n steps:\r\n - init\r\n - plan\r\n apply:\r\n steps: [apply]" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "feature", + "waiting-on-review", + "terragrunt" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/2261", + "sourceRepo": "runatlantis/atlantis", + "reactions": 17, + "comments": 25 + }, + "security": { + "scannedAt": "2026-02-27T17:47:40.615Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-2507-feat-auto-plan-projects-when-modules-change.json b/solutions/cncf-generated/atlantis/atlantis-2507-feat-auto-plan-projects-when-modules-change.json new file mode 100644 index 00000000..3933463e --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-2507-feat-auto-plan-projects-when-modules-change.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:51.655Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat: auto-plan projects when modules change", + "description": "Fixes #920\n\nSee [testdata](https://github.com/runatlantis/atlantis/pull/2507/files#diff-f2a614e806f20051c955922d44c5602829e844794bfcd938e5d91d35cdfae172) for an example repo structure. Any project that uses modules in the same repo should be able to take advantage. \n\nOnly files that are not considered part of a project will be considered. With the current auto-plan heuristic, only changed files in `/modules/` without a `main.tf` would trigger a check of the modules dependencies. \n\nAdds `--autoplan-modules` and `--autoplan-modules-from-projects`", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@iamnoah please merge from the default branch to fix the `branch` test failure", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "bug", + "waiting-on-review" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/2507", + "sourceRepo": "runatlantis/atlantis", + "reactions": 9, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:47:51.655Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-3086-feat-policies-add-granular-policy-sets.json b/solutions/cncf-generated/atlantis/atlantis-3086-feat-policies-add-granular-policy-sets.json new file mode 100644 index 00000000..7832f237 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-3086-feat-policies-add-granular-policy-sets.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:50.384Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat(policies): Add granular policy_sets", + "description": "## what\n\nThis change updates policy checks to be more granular.\nNamely:\n- When specifying multiple policy_sets, owners of each policy set are considered independently.\n- Top-level owners are able to give approval for all policy sets.\n- Multiple approvals can be configured to be required to pass policy checks. i.e. a policy set can require 2 approvals to pass.\n- Policies to approve can be specified by: project, dir, workspace, or an individual poilicyset.\n- Output from conftest will be placed in the working dir of the project, so it can be used in custom workflow steps. i.e for use to generate metrics to send to a logging platform of choice.\n\nImplementation details:\n\nPolicySet configuration allows for the specifying of owners at the global level and on a per policy set level. Additionally, a new parameter `ReviewCount`, which specifies how many approvals are required to override policy checks, has been added on the same levels. These are both considered when determining if a policy set ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thanks a lot for your contribution @pseudomorph. This may take some time to review.\n\nIn the mean time, could you use your solution in your own setup and make sure it works as expected? That will be one of the best tests you can do while we review it when we have the time.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "docs", + "go", + "needs-tests", + "waiting-on-review" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/3086", + "sourceRepo": "runatlantis/atlantis", + "reactions": 9, + "comments": 29 + }, + "security": { + "scannedAt": "2026-02-27T17:47:50.384Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-3269-feat-drift-detection.json b/solutions/cncf-generated/atlantis/atlantis-3269-feat-drift-detection.json new file mode 100644 index 00000000..eaf96f41 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-3269-feat-drift-detection.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:39.462Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat: drift detection", + "description": "## what\nWork in progress Implementation of #3245 drift detection feature. It allows users to configure drift detection for specific projects so atlantis can detect drift and create a pull request based on this change. Working on:\n\n- [ ] Creation of atlantis.yml parameters\n- [ ] Creation of server parameters\n- [ ] Creation of cron job for drift detection of requested projects\n- [ ] Creation of pull request with any detected drift using token\n - [ ] Github\n - [ ] Gitlab\n - [ ] Bitbucket\n - [ ] Azure Devops\n\n## why\n\nTo support drift detection natively in atlantis\n\n## tests\n\n- [ ] Test atlantis.yml configuration parameters\n- [ ] Test server configuration parameters\n- [ ] Test drift detection job\n\n## references\n\ncloses #3245", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "so far this looks good @motatoes", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "go" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [ + "Job" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/3269", + "sourceRepo": "runatlantis/atlantis", + "reactions": 19, + "comments": 5 + }, + "security": { + "scannedAt": "2026-02-27T17:47:39.462Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-3287-feat-api-pass-flags-to-plan-and-apply-endpoints.json b/solutions/cncf-generated/atlantis/atlantis-3287-feat-api-pass-flags-to-plan-and-apply-endpoints.json new file mode 100644 index 00000000..cb6ba713 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-3287-feat-api-pass-flags-to-plan-and-apply-endpoints.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:55.326Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat(api): pass flags to plan and apply endpoints", + "description": "## what\n- Adding option to add flags to API\n\n## why\nused to specify terraform arguments via the API\n\n## tests\n\n
curl tests\n\n_input_\n```\n--data-raw '{\n \"Repository\": \"igaskin/atlantis-demo\",\n \"Ref\": \"igaskin-patch-2\",\n \"Type\": \"Github\",\n \"Paths\": [\n {\n \"Directory\": \".\",\n \"Workspace\": \"staging\",\n \"Flags\": [\n \"-target\",\n \"null_resource.null_resource_simple\"\n ]\n }\n ],\n \"PR\": 6\n}'\n```\n\n_output_\n```\n\nInitializing the backend...\n\nInitializing provider plugins...\n- Finding latest version of hashicorp/null...\n- Using hashicorp/null v3.2.1 from the shared cache directory\n\nTerraform has created a lock file .terraform.lock.hcl to record the provider\nselections it made above. Include this file in your version control repository\nso that Terraform can guarantee to make the same selections by default when\nyou run \"terraform init\" in the future.\n\nTerraform has been succes", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@igaskin could you resolve the test failures ? Given the approvers, once the tests are passing, the pr should be good to merge", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "--data-raw '{\r\n \"Repository\": \"igaskin/atlantis-demo\",\r\n \"Ref\": \"igaskin-patch-2\",\r\n \"Type\": \"Github\",\r\n \"Paths\": [\r\n {\r\n \"Directory\": \".\",\r\n \"Workspace\": \"staging\",\r\n \"Flags\": [\r\n \"-target\",\r\n \"null_resource.null_resource_simple\"\r\n ]\r\n }\r\n ],\r\n \"PR\": 6\r\n}'", + "Initializing the backend...\r\n\r\nInitializing provider plugins...\r\n- Finding latest version of hashicorp/null...\r\n- Using hashicorp/null v3.2.1 from the shared cache directory\r\n\r\nTerraform has created a lock file .terraform.lock.hcl to record the provider\r\nselections it made above. Include this file in your version control repository\r\nso that Terraform can guarantee to make the same selections by default when\r\nyou run \"terraform init\" in the future.\r\n\r\nTerraform has been successfully initialized!\r\n\r\nTerraform used the selected providers to generate the following execution\r\nplan. Resource actions are indicated with the following symbols:\r\n + create\r\n\r\nTerraform will perform the following actions:\r\n\r\n # null_resource.null_resource_simple will be created\r\n + resource \"null_resource\" \"null_resource_simple\" {\r\n + id = (known after apply)\r\n }\r\n\r\nPlan: 1 to add, 0 to change, 0 to destroy.\r\n╷\r\n│ Warning: Resource targeting is in effect\r\n│ \r\n│ You are creating a plan with the -target option, which means that the\r\n│ result of this plan may not represent all of the changes requested by the\r\n│ current configuration.\r\n│ \r\n│ The -target option is not for routine use, and is provided only for\r\n│ exceptional situations such as recovering from errors or mistakes, or when\r\n│ Terraform specifically suggests to use it as part of an error message.\r\n╵", + "--data-raw '{\r\n \"Repository\": \"igaskin/atlantis-demo\",\r\n \"Ref\": \"igaskin-patch-2\",\r\n \"Type\": \"Github\",\r\n \"Paths\": [\r\n {\r\n \"Directory\": \".\",\r\n \"Workspace\": \"staging\",\r\n \"Flags\": [\r\n \"-target\",\r\n \"foo\"\r\n ]\r\n }\r\n ],\r\n \"PR\": 6\r\n}'" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "waiting-on-response", + "docs", + "go", + "needs-tests" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/3287", + "sourceRepo": "runatlantis/atlantis", + "reactions": 8, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:47:55.326Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-3607-feat-adding-extra-plan-and-apply-flags-to-api-controller.json b/solutions/cncf-generated/atlantis/atlantis-3607-feat-adding-extra-plan-and-apply-flags-to-api-controller.json new file mode 100644 index 00000000..b2ca6874 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-3607-feat-adding-extra-plan-and-apply-flags-to-api-controller.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:33.122Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat: adding extra plan and apply flags to api_controller", + "description": "## what\n * Adding option to add flags to API (Test and Rebased from #3287 ) plus more improvements\n \n ## why\n used to specify terraform arguments via the API\n \n ## tests\n\n
\nControl Test:\n\n```\n/usr/local/bin/python3.9 /Users/david.murphy/atlantis_api_test/atlantis_api_base_plan.py \nPlan request sent successfully!\nPayload was:\n{'PR': 1,\n 'Paths': [{'Directory': '.', 'Workspace': 'default'}],\n 'Ref': 'example',\n 'Repository': 'dbmurphy/atlantis-example',\n 'Type': 'Github'}\nTF Command was: atlantis plan -d .\nTF Output\n\nTerraform used the selected providers to generate the following execution\nplan. Resource actions are indicated with the following symbols:\n+ create\n\nTerraform will perform the following actions:\n\n # null_resource.example will be created\n+ resource \"null_resource\" \"example\" {\n + id = (known after apply)\n }\n\n # null_resource.example2 will be created\n+ resource \"null_resource\" \"example2\" {\n + id = (known after apply)\n }\n\nPlan: 2 to a", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> @GenPage is there something I need to do here to trigger assignments and reviews? The only fail is on vupress messing with deploy test but I see you have a PR about that one.\n\njust requesting the review for maintainers is enough, we will try to review this week and the vuepress error should be fixed after we merge the fix we have.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/usr/local/bin/python3.9 /Users/david.murphy/atlantis_api_test/atlantis_api_base_plan.py \r\nPlan request sent successfully!\r\nPayload was:\r\n{'PR': 1,\r\n 'Paths': [{'Directory': '.', 'Workspace': 'default'}],\r\n 'Ref': 'example',\r\n 'Repository': 'dbmurphy/atlantis-example',\r\n 'Type': 'Github'}\r\nTF Command was: atlantis plan -d .\r\nTF Output\r\n\r\nTerraform used the selected providers to generate the following execution\r\nplan. Resource actions are indicated with the following symbols:\r\n+ create\r\n\r\nTerraform will perform the following actions:\r\n\r\n # null_resource.example will be created\r\n+ resource \"null_resource\" \"example\" {\r\n + id = (known after apply)\r\n }\r\n\r\n # null_resource.example2 will be created\r\n+ resource \"null_resource\" \"example2\" {\r\n + id = (known after apply)\r\n }\r\n\r\nPlan: 2 to add, 0 to change, 0 to destroy.", + "/usr/local/bin/python3.9 /Users/david.murphy/atlantis_api_test/atlantis_api_targeted_plan.py \r\nPlan request sent successfully!\r\nPayload was:\r\n{'PR': 1,\r\n 'Paths': [{'Directory': '.',\r\n 'ExtraFlags': ['-target=null_resource.example2'],\r\n 'Workspace': 'default'}],\r\n 'Ref': 'example',\r\n 'Repository': 'dbmurphy/atlantis-example',\r\n 'Type': 'Github'}\r\nTF Command was: atlantis plan -d . -- -target=null_resource.example2\r\nTF Output\r\n\r\nTerraform used the selected providers to generate the following execution\r\nplan. Resource actions are indicated with the following symbols:\r\n+ create\r\n\r\nTerraform will perform the following actions:\r\n\r\n # null_resource.example2 will be created\r\n+ resource \"null_resource\" \"example2\" {\r\n + id = (known after apply)\r\n }\r\n\r\nPlan: 1 to add, 0 to change, 0 to destroy.\r\n╷\r\n│ Warning: Resource targeting is in effect\r\n│ \r\n│ You are creating a plan with the -target option, which means that the\r\n│ result of this plan may not represent all of the changes requested by the\r\n│ current configuration.\r\n│ \r\n│ The -target option is not for routine use, and is provided only for\r\n│ exceptional situations such as recovering from errors or mistakes, or when\r\n│ Terraform specifically suggests to use it as part of an error message.\r\n╵", + "/usr/local/bin/python3.9 /Users/david.murphy/atlantis_api_test/atlantis_api_bad_plan.py \r\nError occurred: \r\nStatus Code: (500)\r\n{'Error': None,\r\n 'Failure': '',\r\n 'PlansDeleted': False,\r\n 'ProjectResults': [{'ApplySuccess': '',\r\n 'Command': 1,\r\n 'Error': {},\r\n 'Failure': '',\r\n 'ImportSuccess': None,\r\n 'PlanSuccess': None,\r\n 'PolicyCheckResults': None,\r\n 'ProjectName': '',\r\n 'RepoRelDir': '.',\r\n 'StateRmSuccess': None,\r\n 'SubCommand': '',\r\n 'VersionSuccess': '',\r\n 'Workspace': 'default'}]}\r\nPayload was:\r\n{'PR': 1,\r\n 'Paths': [{'Directory': '.',\r\n 'ExtraFlags': ['--invalid-flag'],\r\n 'Workspace': 'default'}],\r\n 'Ref': 'example',\r\n 'Repository': 'dbmurphy/atlantis-example',\r\n 'Type': 'Github'}" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "feature", + "waiting-on-response", + "docs", + "api-endpoints", + "go", + "stale" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/3607", + "sourceRepo": "runatlantis/atlantis", + "reactions": 23, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:47:33.122Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-4001-feat-add-support-for-gitlab-groups.json b/solutions/cncf-generated/atlantis/atlantis-4001-feat-add-support-for-gitlab-groups.json new file mode 100644 index 00000000..9c252a50 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-4001-feat-add-support-for-gitlab-groups.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:43.425Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat: add support for GitLab groups", + "description": "## what\n\nAdd support for GitLab groups in the commmand allowlist and policy owners.\n\nUnlike with GitHub, the GitLab API doesn't allow retrieving all group membership for a given user (unless you have admin access), so here instead we check in each configured group if the user is an active member of it.\n\n## why\n\n- To be able to group permissions with GitLab instead of only users\n- Feature parity with GitHub\n\n## tests\n\n- `make test`\n- Configured `ATLANTIS_GITLAB_GROUP_ALLOWLIST` and policy owners with one of my groups and then with a group I'm not a member of instead, then tried an `atlantis plan` and `atlantis approve_policies` in a merge request, and I was respectively allowed and denied in each case as expected.\n\n## references\n\nCloses #2549 #4799", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@jamengual hi, any chance this could make it into the next release? It would be really helpful for us at GitLab, and for all GitLab users. 🙏🏻", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "policies:\r\n policy_sets:\r\n - name: pe_mandatory_policies\r\n path: /atlantis-data/policies/pe_policies\r\n source: local\r\n prevent_self_approve: true\r\n owners:\r\n teams:\r\n - production-engineers" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "waiting-on-response", + "docs", + "provider-azuredevops", + "go", + "waiting-on-review", + "provider-bitbucket", + "provider-github", + "provider-gitlab", + "lgtm" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/4001", + "sourceRepo": "runatlantis/atlantis", + "reactions": 16, + "comments": 36 + }, + "security": { + "scannedAt": "2026-02-27T17:47:43.425Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-4114-feat-drift-detection-wip.json b/solutions/cncf-generated/atlantis/atlantis-4114-feat-drift-detection-wip.json new file mode 100644 index 00000000..441b35a5 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-4114-feat-drift-detection-wip.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:35.444Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat:drift detection (wip)", + "description": "(Still a work in progress PR, not ready for review yet, but you are welcome to have a look :))\n\n## what\nWork in progress Implementation of #3245 drift detection feature. It allows users to configure drift detection for specific projects so atlantis can detect drift and create a pull request based on this change. Working on:\n\n- [ ] Creation of atlantis.yml parameters\n- [ ] Creation of server parameters\n- [ ] Creation of cron job for drift detection of requested projects\n- [ ] Creation of pull request with any detected drift using token\n - [ ] Github\n - [ ] Gitlab\n - [ ] Bitbucket\n - [ ] Azure Devops\n\n## why\n\nTo support drift detection natively in atlantis\n\n## tests\n\n- [ ] Test atlantis.yml configuration parameters\n- [ ] Test server configuration parameters\n- [ ] Test drift detection job\n\n## references\n\n- closes #3245", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## what\nWork in progress Implementation of #3245 drift detection feature. It allows users to configure drift detection for specific projects so atlantis can detect drift and create a pull request based on this change. Working on:\n\n- [ ] Creation of atlantis.yml parameters\n- [ ] Creation of server parameters\n- [ ] Creation of cron job for drift detection of requested projects\n- [ ] Creation of pull request with any detected drift using token\n - [ ] Github\n - [ ] Gitlab\n - [ ] Bitbucket\n - [ ] Azure Devops\n\n## why\n\nTo support drift detection natively in atlantis\n\n## tests\n\n- [ ] Test atlantis.yml configuration parameters\n- [ ] Test server configuration parameters\n- [ ] Test drift detection job\n\n## references\n\ncloses #3245", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "go" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [ + "Job" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/4114", + "sourceRepo": "runatlantis/atlantis", + "reactions": 21, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:47:35.445Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-4193-fix-github-mergeability-bypassing-apply.json b/solutions/cncf-generated/atlantis/atlantis-4193-fix-github-mergeability-bypassing-apply.json new file mode 100644 index 00000000..42d9d730 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-4193-fix-github-mergeability-bypassing-apply.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:37.313Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: fix: GitHub mergeability bypassing apply", + "description": "## what\n\nFirst of all, this PR is only concerned with how Atlantis determines if it should proceed with apply when the `--gh-allow-mergeable-bypass-apply` is used on a GitHub PR that is in the `blocked` state because Atlantis apply is a required check.\n\nThe following changes to its behaviour are made:\n\n- Distinguish between required and non-required statuses\n- Determine if a check is required by a rulesets and/or branch protection\n - Previously only branch protection was considered\n- Prevent apply if there are required checks in pending or queued state\n - Solution to this issue https://github.com/runatlantis/atlantis/issues/3811\n- Identify all required checks set by branch protections and rulesets and all required workflows set by rulesets\n - Solution to this issue https://github.com/runatlantis/atlantis/issues/4272\n\nThis is all accomplished by using the GraphQL API instead of the REST API, because the former conveniently provides the current state of all statuses and checks on the ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## what\n\nCurrently when the apply condition is set to mergeable along with `--gh-allow-mergeable-bypass-apply` flag, Atlantis considers non required status checks as well to decide whether the PR is mergeable or not (to allow apply or not). This is a deviation from standard github mergeable status.\n\nThis PR changes that and includes only required status checks while evaluating mergeable status.\n\n## why\n\nAtlantis uses mergeable status from github API to check if a PR is mergeable if `--gh-allow-mergeable-bypass-apply` is not set. That is if the status is `unstable`, the PR is considered mergeable.\n\n```\nunstable: Failing/pending commit status that is not part of the required\n\t// status checks. Merging is allowed (yellow box).\n```\n\nIf `--gh-allow-mergeable-bypass-apply` enabled, github uses some additional checks to see if the PR is mergeable without considering `atlantis apply`. But in these additional checks, Atlantis looks at all statuses irrespective of if they are required or not. So, if there is a status check that is failing Atlantis will consider it non mergeable even if its not a required status check and thus will block Apply.\n\nIn this change, while checking if the state is `success`, it will also check if the particular status is one of the required checks. Hence Atlantis will report non mergeable only if the failing status check is a required one.\n\n## tests\n\n- [x] I have adjusted the test data to include scenarios where there could be failing non require", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "unstable: Failing/pending commit status that is not part of the required\r\n\t// status checks. Merging is allowed (yellow box).", + "\"msg\":\"unable to get pull request status: fetching mergeability status for repo: , and pull number: 395: getting pull request status: fetching rulesets, branch protections and status checks from GraphQL: Resource not accessible by integration. Continuing with mergeable and approved assumed false\"," + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "go", + "needs-discussion", + "waiting-on-review", + "provider-github" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/4193", + "sourceRepo": "runatlantis/atlantis", + "reactions": 21, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:47:37.313Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-4229-feat-add-gitea-support.json b/solutions/cncf-generated/atlantis/atlantis-4229-feat-add-gitea-support.json new file mode 100644 index 00000000..57dbbda7 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-4229-feat-add-gitea-support.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:49.187Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat: Add Gitea support", + "description": "## what\n\nAddition of Gitea client in order to close #3538\n\nA functionally complete implementation of a client for Gitea, with the sole exception being the listing of teams that a user is a member of. However, that's not uncommon for other clients.\n\nIt was tested manually against both a Gitea and a Forgejo install. Automated tests were limited to the basics. A large part of the implementation uses the Gitea SDK.\n\n## why\n\nSee #3538\n\n## references\n\n- Closes #3538 \n- Please test `ghcr.io/mvdkleijn/atlantis:dev`\n\n## testing\n\n- TestExecute_ValidateVCSConfig\n- TestExecute_GiteaUser\n- TestExecute_GiteaBaseURLScheme\n- TestExecute_GiteaWithWebhookSecret\n- TestExecute_GiteaBaseURLPort\n\nAlso: manual testing by @florianbeisel and @mvdkleijn against Gitea and Forgejo", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi all. The pr says this is a draft and no tests. Is that accurate? If not, could you update this? Tests are always nice when adding a new feature as the feature could break in the future and tests would prevent merging a follow up breaking PR.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "feature", + "docs", + "dependencies", + "go", + "provider-gitea" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/4229", + "sourceRepo": "runatlantis/atlantis", + "reactions": 11, + "comments": 51 + }, + "security": { + "scannedAt": "2026-02-27T17:47:49.187Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-4275-feat-bitbucket-cloud-add-support-for-webhook-secrets.json b/solutions/cncf-generated/atlantis/atlantis-4275-feat-bitbucket-cloud-add-support-for-webhook-secrets.json new file mode 100644 index 00000000..1a1b9443 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-4275-feat-bitbucket-cloud-add-support-for-webhook-secrets.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:54.277Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat: BitBucket Cloud: add support for webhook secrets", + "description": "## what\n\nAdded support for Bitbucket Cloud Webhook secrets. Bitbucket added support in the cloud offering in October of 2023 [Announcement](https://bitbucket.org/blog/enhanced-webhook-security)\n\n* Ported the request validation libraries from bitbucket server to bitbucket cloud client.\n* Removed error check and associated test to ensure they aren't being used because they weren't supported\n\n## why\n\nThere is an open request for this #3909 and I'm also in need of this feature.\n\n## tests\n\nI ran make test, the logic is the same as bitbucketserver.\n\n## references\n\n- Fixes #3909", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "kindly get this change merged. we are in very much need for this feature.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "server/controllers/events/events_controller.go:235:54:\r\n printf: non-constant format string in call to (*github.com/runatlantis/atlantis/server/controllers/events.VCSEventsController).respond (govet)" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "waiting-on-response", + "docs", + "go", + "provider-bitbucket", + "security", + "lgtm" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/4275", + "sourceRepo": "runatlantis/atlantis", + "reactions": 8, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:47:54.277Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-4499-feat-opentofu-support.json b/solutions/cncf-generated/atlantis/atlantis-4499-feat-opentofu-support.json new file mode 100644 index 00000000..293b1ccb --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-4499-feat-opentofu-support.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:38.452Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat: OpenTofu support", + "description": "## what\n\nThis is a change to get OpenTofu to work with Atlantis:\n- Introduces a `--tf-distribution` setting that can be set to `terraform` or `opentofu`.\n- OpenTofu is downloaded via tofudl.\n- Added an interface to `server/core/terraform` package for the Terraform distribution. This matches up with the `--tf-distribution` setting.\n- I would like to also support configuring Terraform or OpenTofu per project in the server side config or repo level config. This would support an easier path to migrate between the two.\n\n## why\n\n#3741\n\n## tests\n\n- Have refactored out a distribution and made changes to the downloaders for both Terraform and Conftest to be more testable\n- I've deployed this branch within my org, and have got successful plan and applies working\n\n## references\n\n- Closes #3741", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Due to wip status, I switched this to a draft as a non draft means that it's ready for review. Please set it as ready to review when ready. \n\nThank you for the contribution", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "func (c *DefaultClient) DetectVersion(log logging.SimpleLogging, projectDirectory string) *version.Version {\r\n\treturn c.distribution.DetectVersion(log, c, projectDirectory)\r\n}\r\n\r\ntype Distribution interface {\r\n\tBinName() string\r\n\tSourceURL(v *version.Version, downloadURL string) string\r\n\tDetectVersion(log logging.SimpleLogging, c *DefaultClient, projectDirectory string) *version.Version\r\n}\r\n\r\nfunc (*DistributionTerraform) DetectVersion(log logging.SimpleLogging, c DefaultClient, projectDirectory string) *version.Version {\r\n// use hc-install\r\n}\r\n\r\nfunc (*DistributionOpenTofu) ListAvailableVersions(log logging.SimpleLogging, downloadBaseURL string, downloadAllowed bool) ([]string, error) {\r\n// use current implementation - only called by the DetectVersion below\r\n}\r\n\r\nfunc (dt *DistributionOpenTofu) DetectVersion(log logging.SimpleLogging, c DefaultClient, projectDirectory string) *version.Version {\r\n// use current implementation\r\ne.g. tfVersions, err := dt.ListAvailableVersions(log, c.downloadBaseURL, c.downloadAllowed)\r\n}" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "feature", + "docs", + "dependencies", + "go", + "waiting-on-review", + "lgtm" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/4499", + "sourceRepo": "runatlantis/atlantis", + "reactions": 20, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:47:38.452Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-4978-feat-add-ignore-vcs-status-names-github-only.json b/solutions/cncf-generated/atlantis/atlantis-4978-feat-add-ignore-vcs-status-names-github-only.json new file mode 100644 index 00000000..b03320d5 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-4978-feat-add-ignore-vcs-status-names-github-only.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:44.557Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat: add ignore-vcs-status-names (github only)", + "description": "## what\n\nComma separated list of VCS status names from other atlantis services.\n\nWhen `gh-allow-mergeable-bypass-apply` is true, will ignore status checks (e.g. `status1/plan`, `status1/apply`, `status2/plan`, `status2/apply`) from other atlantis services when checking if the PR is mergeable.\n\nOnly for Github but can be extended to other VCS in the future.\n\n## why\n\nThis PR is a proposal to close #2848 \n\n## tests\n\n~~I have only run `make test` but planning on testing in my own environment if the proposal is accepted.~~\nTested with `make test` and on my environment with `bappr/atlantis:cud-ignore-vcs-status-names-ca1171e0dd174edbbd63f8f6ae560caa6af4c0c7-0`.\n\n## references\n\ncloses #2848", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "interesting proposition.\none quick thing I will say that instead of adding a flag for this it will be much better to add it to the server side repo config with something like : \n```yaml\nvcs-status-to-ignore:\n - name1\n - name2\n - etc\n ```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "vcs-status-to-ignore:\r\n - name1\r\n - name2\r\n - etc" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "feature", + "waiting-on-response", + "docs", + "provider-azuredevops", + "go", + "provider-bitbucket", + "provider-github", + "provider-gitlab", + "lgtm" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/4978", + "sourceRepo": "runatlantis/atlantis", + "reactions": 15, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:47:44.557Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-5940-feat-add-bitbucket-cloud-api-user-flag.json b/solutions/cncf-generated/atlantis/atlantis-5940-feat-add-bitbucket-cloud-api-user-flag.json new file mode 100644 index 00000000..4f2926b9 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-5940-feat-add-bitbucket-cloud-api-user-flag.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:46.522Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "atlantis: feat: add bitbucket cloud api-user flag", + "description": "## what\n\n- Adds bitbucket-api-user flag for the Bitbucket Cloud client, keeping bitbucket-user just for git operations. By default and for backward compatibility, if not bitbucket-api-user is provided, it uses the bitbucket-user flag.\n\n## why\n\nBitbucket Cloud deprecated App Password authentication, which previously supported the same user for both API calls and Git operations. See https://github.com/runatlantis/atlantis/issues/5696 \n\n## tests\nWith the new flag:\n`./atlantis server --bitbucket-user '' --bitbucket-api-user '' --bitbucket-token '' --repo-allowlist '*' --log-level info`\n\n`{\"level\":\"info\",\"ts\":\"2025-11-07T10:54:04.448-0300\",\"caller\":\"server/server.go:343\",\"msg\":\"Supported VCS Hosts: BitbucketCloud\",\"json\":{}}\n{\"level\":\"info\",\"ts\":\"2025-11-07T10:54:04.814-0300\",\"caller\":\"server/server.go:504\",\"msg\":\"Utilizing BoltDB\",\"json\":{}}\n{\"level\":\"info\",\"ts\":\"2025-11-07T10:54:04.827-0300\",\"caller\":\"policy/conftest_client.go:168\",\"msg\":\"failed to get defa", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## what\n\nSeparates out `email` from `user` for bitbucket.\n\n## why\n\nMy understanding of #5696 is that there has to be a separate \"username\" from \"email\" address in the new bitbucket authentication scheme, so I added a flag to tease that out:\n\n```\natlantis % go run main.go server --bitbucket-user foo --bitbucket-token bar --repo-allowlist='hi' \nError: --bitbucket-email must be specified alongside --bitbucket-user\nexit status 1\natlantis % go run main.go server --bitbucket-user foo --bitbucket-token bar --repo-allowlist='hi' --bitbucket-email=foo@bar\n{\"level\":\"info\",\"ts\":\"2025-10-30T23:06:24.009-0400\",\"caller\":\"server/server.go:345\",\"msg\":\"Supported VCS Hosts: BitbucketCloud\",\"json\":{}}\n```\n\nDISCLAIMER: I've never used bitbucket before, and am just going off the description of a problem in #5696 to try to help out.\n\n## tests\n\nTODO: add tests\n\nAlso need to update documentation\n\n## references\n\ncloses: #5696", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "atlantis % go run main.go server --bitbucket-user foo --bitbucket-token bar --repo-allowlist='hi' \r\nError: --bitbucket-email must be specified alongside --bitbucket-user\r\nexit status 1\r\natlantis % go run main.go server --bitbucket-user foo --bitbucket-token bar --repo-allowlist='hi' --bitbucket-email=foo@bar\r\n{\"level\":\"info\",\"ts\":\"2025-10-30T23:06:24.009-0400\",\"caller\":\"server/server.go:345\",\"msg\":\"Supported VCS Hosts: BitbucketCloud\",\"json\":{}}" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "feature", + "docs", + "go", + "provider-bitbucket", + "lgtm" + ], + "category": "workloads", + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/runatlantis/atlantis/pull/5940", + "sourceRepo": "runatlantis/atlantis", + "reactions": 15, + "comments": 0 + }, + "security": { + "scannedAt": "2026-02-27T17:47:46.522Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-11173-scaffolder-add-dry-run-template-editor.json b/solutions/cncf-generated/backstage/backstage-11173-scaffolder-add-dry-run-template-editor.json new file mode 100644 index 00000000..bb4d12ad --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-11173-scaffolder-add-dry-run-template-editor.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:42.712Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: scaffolder: add dry-run + Template Editor", + "description": "## Hey, I just made a Pull Request!\n\nThis adds a new `/v2/dry-run` endpoint to the scaffolder backend. It receives a serialized template, including local files, and returns a serialized result of running the template. The execution happens directly in the scaffolder backend and unlike regular scaffolder task jobs it's synchronous, without streaming of logs etc.\n\nTo support the dry-run endpoint, actions can now declare whether they support dry-run or not, with not supporting it being the default. An action that supports dry-run is responsible for checking `ctx.isDryRun` on the handler context and to behave accordingly.\n\nIn addition to the dry-run endpoint in the backend, this adds a new template editor to the frontend. It builds upon the existing form preview, but uses the web directory access API to load in templated from the local filesystem for editing. This lets users both edit the entire template as well as template contents, as well as trying it out in practice using the new dry-r", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@dotressel Thank you! No docs beyond the pretty slim API surface. What kind of docs would you be interested in? For integrators and how to build out dry-running capabilities, or something more end-user oriented like where to find the template editor and how to use it?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition", + "area-scaffolder" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [ + "Job" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/backstage/backstage/pull/11173", + "sourceRepo": "backstage/backstage", + "reactions": 25, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:45:42.713Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-14642-allow-me-to-introduce-the-devtools-plugin.json b/solutions/cncf-generated/backstage/backstage-14642-allow-me-to-introduce-the-devtools-plugin.json new file mode 100644 index 00000000..85a7de89 --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-14642-allow-me-to-introduce-the-devtools-plugin.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:43.775Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: Allow me to introduce the DevTools plugin! 🧰 ", + "description": "## Hey, I just made a Pull Request!\n\nAllow me to introduce the DevTools plugin - another handy tool to add to your Backstage 🧰 ! This will close #9737 and hopefully open a world of new features to help Backstage Adopters who run the Ops side of things 😉 \n\nThe plugin offers the following features:\n\nLists info about your current Backstage instance - OS, NodeJS version, Backstage version and packages:\n\n![devtools-info-tab](https://user-images.githubusercontent.com/24488346/201972417-4a3507be-fe5c-40df-b027-1ae15e11e4c5.png)\n\nLists the config being used by your current Backstage instance:\n\n![devtools-config-tab](https://user-images.githubusercontent.com/24488346/201972459-bda0c98e-743b-4c42-987d-ba6c08d123c5.png)\n\nThere's also an optional tab where you can list connectivity with External Dependencies:\n\n![devtools-external-dependencies](https://user-images.githubusercontent.com/24488346/201972525-da0fb142-9f45-41d8-8735-65028608e4e3.png)\n\nThe plugin has also been designed so you can custo", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "That CI check is new - you can just run `yarn dedupe` to get it clean again. Soon we will have a precommit hook that does it for you transparently #14663", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/backstage/backstage/pull/14642", + "sourceRepo": "backstage/backstage", + "reactions": 22, + "comments": 32 + }, + "security": { + "scannedAt": "2026-02-27T17:45:43.775Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-14718-introducing-the-linguist-plugin.json b/solutions/cncf-generated/backstage/backstage-14718-introducing-the-linguist-plugin.json new file mode 100644 index 00000000..2e2f5074 --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-14718-introducing-the-linguist-plugin.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:58.437Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: Introducing the Linguist plugin", + "description": "## Hey, I just made a Pull Request!\n\nIntroducing the Linguist plugin: get a nice breakdown of all the languages that make up an Entity in the Catalog! This is great for those SCMs that don't offer this feature like Azure DevOps or BitBucket, but it still works with the others as well. Closes #9733\n\n![linguist-with-data](https://user-images.githubusercontent.com/24488346/202921297-39b4e166-6f9e-4c3a-a917-b88d2df7d350.png)\n\n#### :heavy_check_mark: Checklist\n\n- [x] A changeset describing the change and affected packages. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#creating-changesets))\n- [x] Added or updated documentation\n- [ ] Tests for new functionality and regression tests for bug fixes\n- [x] Screenshots attached (for UI changes)\n- [x] All your commits have a `Signed-off-by` line in the message. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#developer-certificate-of-origin))", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@ahhhndre - Just checking in on the status: Is this still a Draft or should we mark it as \"Ready for review\"? (I know @freben already kind of reviewed it anyway :D)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition", + "area-catalog" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/backstage/backstage/pull/14718", + "sourceRepo": "backstage/backstage", + "reactions": 8, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:45:58.437Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-15417-use-default-when-no-mkdocs-config-found.json b/solutions/cncf-generated/backstage/backstage-15417-use-default-when-no-mkdocs-config-found.json new file mode 100644 index 00000000..b5d824b6 --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-15417-use-default-when-no-mkdocs-config-found.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:59.700Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: Use default when no MkDocs config found", + "description": "Signed-off-by: Andre Wanlin <67169551+awanlin@users.noreply.github.com>\n\n## Hey, I just made a Pull Request!\n\nThis adds support for using a basic default `mkdocs.yml` configuration file when none is found.\n\nCloses #15413\n\n#### :heavy_check_mark: Checklist\n\n- [x] A changeset describing the change and affected packages. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#creating-changesets))\n- [ ] Added or updated documentation\n- [x] Tests for new functionality and regression tests for bug fixes\n- [ ] Screenshots attached (for UI changes)\n- [x] All your commits have a `Signed-off-by` line in the message. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#developer-certificate-of-origin))", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@agentbellnorm @realandersn I could use your help. I'm trying to do some final testing and I'm having a hard time getting `techdocs-cli serve` to work, I always get a 404. The steps I'm taking are as follows:\n\n1. From the root of the project run: `yarn workspace @techdocs/cli build`\n2. Navigate to using this command: `cd packages/techdocs-cli/src/example-docs`\n3. Now run: `../../../../packages/techdocs-cli/bin/techdocs-cli serve --no-docker`\n\nYou'll see some output about it starting and then in a browser all I get is a 404 message. Running `techdocs-cli serve:mkdocs` works perfectly so a little confused here. I've been following the instructions from here: https://github.com/backstage/backstage/tree/master/packages/techdocs-cli#running", + "steps": [ + "From the root of the project run: `yarn workspace @techdocs/cli build`", + "Navigate to using this command: `cd packages/techdocs-cli/src/example-docs`", + "Now run: `../../../../packages/techdocs-cli/bin/techdocs-cli serve --no-docker`" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition", + "area-techdocs" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/backstage/backstage/pull/15417", + "sourceRepo": "backstage/backstage", + "reactions": 7, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:45:59.700Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-20598-migrate-to-react-18.json b/solutions/cncf-generated/backstage/backstage-20598-migrate-to-react-18.json new file mode 100644 index 00000000..5985063b --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-20598-migrate-to-react-18.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:54.656Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: Migrate to React 18", + "description": "## Hey, I just made a Pull Request!\n\nCloses backstage/backstage#12252\n\nThis switches all packages to React 18. The largest chunk of this work is updates to support the new version of `@testing-library/react` that only supports React 18. Unfortunately there doesn't seem to be a sane way to partially migrate apps due to TLR only supporting one of either React 17 or 18 at a time.\n\nMigration docs to follow assuming this all works out. Couple of notes I've taken so far regarding the test changes:\n\n- Seems to be better to call `await act(async () => {})` after render rather than around\n- Waiting for mock functions to be called and then expecting render state to be updated is no longer reliable\n- renderHook no longer forwards initial props to wrapper\n- Rendered components often don't immediately update on user input, it's more important to use `act` now\n\nOverall I was pretty pleased with the type of breakages I encountered in tests. Most breakages was due existing tests not being explicit abo", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com)\n\nThis PR contains the following updates:\n\n| Package | Change | Age | Adoption | Passing | Confidence |\n|---|---|---|---|---|---|\n| [@testing-library/react](https://togithub.com/testing-library/react-testing-library) | [`^12.1.3` -> `^14.0.0`](https://renovatebot.com/diffs/npm/@testing-library%2freact/12.1.5/14.0.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@testing-library%2freact/14.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@testing-library%2freact/14.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@testing-library%2freact/12.1.5/14.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@testing-library%2freact/12.1.5/14.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) |\n\n---\n\n### ⚠ Dependency Lookup Warnings ⚠\n\nWarnings were logged while processing this repo. Please check the logs for more information.\n\n---\n\n### Release Notes\n\n
\ntesting-library/react-testing-library (@​testing-library/react)\n\n### [`v14.0.0`](https://togithub.com/testing-library/react-testing-library/releases/tag/v14.0.0)\n\n[Compare Source](https://togithub.com/testing-library/react-testing-library/comp", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition", + "area-catalog", + "area-techdocs", + "area-scaffolder", + "area-permission", + "area-discoverability" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/backstage/backstage/pull/20598", + "sourceRepo": "backstage/backstage", + "reactions": 10, + "comments": 4 + }, + "security": { + "scannedAt": "2026-02-27T17:45:54.657Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-2637-cost-insights-plugin.json b/solutions/cncf-generated/backstage/backstage-2637-cost-insights-plugin.json new file mode 100644 index 00000000..a003c5f1 --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-2637-cost-insights-plugin.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:45.326Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: Cost Insights Plugin", + "description": "This PR adds Spotify's Cost Insights Tool. Cost Insights explains costs from cloud services in an understandable way, using software terms familiar to your engineers. This tool helps you and your team make trade-offs between cost optimization efforts and your other priorities.\n\nCost Insights features:\n\n- Daily cost graph by team or billing account\n- Cost comparison against configurable business metrics\n- Insights panels for configurable cloud products your company uses\n- Cost alerts and recommendations\n- Selectable time periods for month over month, or quarter over quarter cost comparison\n- Conversion of cost growth into average engineer cost (configurable) to help optimization trade-off decisions\n\n![plugin-cost-insights](https://user-images.githubusercontent.com/3030003/94430416-e166d380-0161-11eb-891c-9ce10187683e.gif)\n\nThis PR adds the Cost Insights frontend React plugin with a defined `CostInsightsApi`. We include an example client with static data in the expected format. This API", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Hey, I just made a Pull Request!\n\nI'm going to add changesets that are missing since the last release. I'm starting with a draft PR so those who're interested can track the progress.\n\n## TODO\n\nThis list was generated with `git log b5e6051..HEAD | grep \"Merge pull request\"`\n\n* [x] #2722 from SDA-SE/feat/async-api-react-update -> https://github.com/spotify/backstage/pull/2724/commits/ac66bb38b0b40b7e5a56b5a40cbf28fa7fc0891d\n* [x] #2720 from SDA-SE/feat/correct-readme\n* [x] #2716 from spotify/rugvip/nightly-fix\n* [x] #2700 from spotify/benjdlambert-patch-2\n* [x] #2701 from spotify/rugvip/trouble\n* [x] #2698 from spotify/emmaindal-patch-1\n* [x] #2674 from lowjoel/postgres-create-database -> https://github.com/spotify/backstage/pull/2724/commits/effd328de7d125d8aad246db38f5a5aa0601f004\n* [x] #2695 from SDA-SE/jenkins-docs\n* [x] #2687 from spotify/rugvip/nightly\n* [x] #2689 from spotify/dependabot/npm_and_yarn/rollup-plugin-typescript2-0.27.3 -> https://github.com/spotify/backstage/pull/2724/commits/e38d40ee302c52b8ae776410bc60ed1355715896\n* [x] #2692 from spotify/alund/marketplace/gcp\n* [x] #2686 from Marvin9/fix/scaffolder-ssr -> https://github.com/spotify/backstage/pull/2724/commits/67fcced5854d688d5b76a5d93b38b5939e442042\n* [x] #2690 from dufcrule/patch-1\n* [x] #2683 from spotify/update-roadmap\n* [x] #2681 from spotify/freben/test-org\n* [x] #2678 from SDA-SE/feat/gauge-storybook\n* [x] #2677 from spotify/orkohunter/docker-image-publishing\n* [x] #2669 from spotify/freben/githu", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/backstage/backstage/pull/2637", + "sourceRepo": "backstage/backstage", + "reactions": 17, + "comments": 3 + }, + "security": { + "scannedAt": "2026-02-27T17:45:45.326Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-30499-fix-techdocs-expandablenavigation-addon-does-not-work-on-firefox.json b/solutions/cncf-generated/backstage/backstage-30499-fix-techdocs-expandablenavigation-addon-does-not-work-on-firefox.json new file mode 100644 index 00000000..6a2136fe --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-30499-fix-techdocs-expandablenavigation-addon-does-not-work-on-firefox.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:57.394Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: fix(techdocs): ExpandableNavigation addon does not work on Firefox", + "description": "## Hey, I just made a Pull Request!\n\nFix #30332\n\nAfter enabling the `ExpandableNavigation`, one of our users reported that he wasn't able to collapse/open individual navigation items: only the top level collapse/open button was working, which is a clear regression from the default Mkdocs/Techdocs experience.\n\nThe issue only appears on Firefox, with no error in the browser side. It works properly on Chrome and Safari.\n\nThis can currently be tested on the demo website: https://demo.backstage.io/docs/default/component/backstage-demo\n\"image\"\n\nThe fix has been tested on Chrome, Firefox and Safari. Tests don't need to be updated.\n\nDemo on Firefox:\n\nhttps://github.com/user-attachments/assets/225ea7f8-2976-4b56-95f9-20f06a0fc63c\n\nKeyboard usage from #29960 is still working on all browsers.\n\n#### :heavy_check_mark: Checklist\n\n- [x] A changeset describing the change and affe", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Would fix #30332", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition", + "area-techdocs" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/backstage/backstage/pull/30499", + "sourceRepo": "backstage/backstage", + "reactions": 9, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:45:57.394Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-31225-feat-home-add-new-frontend-system-support.json b/solutions/cncf-generated/backstage/backstage-31225-feat-home-add-new-frontend-system-support.json new file mode 100644 index 00000000..0d85fc2b --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-31225-feat-home-add-new-frontend-system-support.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:56.253Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: feat(home): add new frontend system support", + "description": "## Hey, I just made a Pull Request!\n\nMigrates home plugin to support the new frontend system architecture by introducing extension blueprints for composable homepage functionality.\n\nKey changes:\n- Add CustomHomepageWidgetBlueprint for creating installable homepage widgets\n- Add CustomHomepageBlueprint for composing pages from widget extensions\n- Introduce titleExtensionDataRef for NFS title handling\n- Visit Tracking enabled via extensions and still off by default -- can be enabled via config file\n\nThis attempts to bring the home plugin up to par with other core plugins that have migrated to the new frontend system\nFixes #31173\n\n#### :heavy_check_mark: Checklist\n\n- [x] A changeset describing the change and affected packages. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#creating-changesets))\n- [x] Added or updated documentation\n- [x] Tests for new functionality and regression tests for bug fixes\n- [ ] Screenshots attached (for UI changes)\n- [x] All you", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Hey, I just made a Pull Request!\n\nBeen seeing the need for this in a couple of places. Previously for the evolution of nav items, and most recently https://github.com/backstage/backstage/pull/31055#discussion_r2350206387. Thinking it's worth adding.\n\n#### :heavy_check_mark: Checklist\n\n- [x] A changeset describing the change and affected packages. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#creating-changesets))\n- [x] Added or updated documentation\n- [ ] Tests for new functionality and regression tests for bug fixes\n- [ ] Screenshots attached (for UI changes)\n- [x] All your commits have a `Signed-off-by` line in the message. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#developer-certificate-of-origin))", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition", + "area-documentation", + "area-home", + "area-search", + "size-large", + "waiting-for-review" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/backstage/backstage/pull/31225", + "sourceRepo": "backstage/backstage", + "reactions": 10, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:45:56.253Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-391-add-feature-flags-api.json b/solutions/cncf-generated/backstage/backstage-391-add-feature-flags-api.json new file mode 100644 index 00000000..fbc09597 --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-391-add-feature-flags-api.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:51.577Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: Add Feature Flags API 🎚", + "description": "## Hey, I just made a Pull Request!\n\nCloses #358 \n\nThis introduces a Feature Flags API which is partially backwards compatible (with Spotify's internal deployment of Backstage) and forwards compatible. Similarly, it will enable a `register` method for the Plugin Registry API. It has settled on being a lightweight API that is independent of React and Backstage, and opens up for later being able to create React and Backstage specific layers later.\n\n### Defining a feature flag\n\n```tsx\nimport { createPlugin } from '@backstage/core';\nimport WelcomePage from './components/WelcomePage';\n\nexport default createPlugin({\n id: 'welcome',\n register({ router, featureFlags }) {\n router.registerRoute('/', WelcomePage);\n\n featureFlags.register('enable-welcome-box');\n },\n});\n\n```\n\n### Using a feature flag\n\n```tsx\nimport React, { FC } from 'react';\nimport { Button } from '@material-ui/core';\nimport { FeatureFlagState, featureFlagsApiRef, useApi } from '@backstage/core';\n\nconst ExampleButton: FC<", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Couple nits, but approvable anyway", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "### Using a feature flag", + "### Reading all registered feature flags" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition", + "type-suggestion", + "area-core" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/backstage/backstage/pull/391", + "sourceRepo": "backstage/backstage", + "reactions": 12, + "comments": 2 + }, + "security": { + "scannedAt": "2026-02-27T17:45:51.577Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-4271-rework-the-user-flow-of-the-catalog-import-plugin.json b/solutions/cncf-generated/backstage/backstage-4271-rework-the-user-flow-of-the-catalog-import-plugin.json new file mode 100644 index 00000000..c3e10237 --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-4271-rework-the-user-flow-of-the-catalog-import-plugin.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:46.849Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: Rework the user flow of the catalog-import plugin", + "description": "Closes #3971\nCloses #3678\nCloses #3591\nRelates to #3811 & #4153\n\nThis lead to quite a big changeset, I hope it will not be too hard to review...\n\nThe catalog import plugin supports several use cases (import a single file, discover files in a repo, offer to do a PR) but the UX isn't very nice atm. This is an attempt to come up with a nicer user flow and a more extensible plugin.\n\nThe plugin is now based on a configurable state-machine (see `useImportState`, `ImportStepper` and `ImportStepper/defaults.tsx`). It supports different flows (=`single-location`, `multiple-locations`, `no-locations`) that each use a slightly different step configuration (there are always four atm: `analyze`->`prepare`->`review`->`finish`). This makes it easy to configure it for different import-use cases.\n\nThese are mainly UI/UX changes while the existing \"logic\" was left intact. So most of the features like the discovery of all catalog files in a repository or the creation of pull requests still **only work in", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Hey, I just made a Pull Request!\n\nCloses #4283 \nThis is to support #4283 on how we can achieve controlling the integration for catalog-import by using props.\n\nThe current code for catalog-import has only special-cases for GitHub integration (allows both PR + import from file) and my use case is to turn on/off the PR option. I had changed to have a global feature-flag for the implementations.\n\nThanks for your time 🎉 \n\nExample screenshot with GitHub integration that has enabled only the import from file option 👇 \n\n\"Screenshot\n\nI had verified that it works with and without the config set.\n\n#### :heavy_check_mark: Checklist\n\n- [x] A changeset describing the change and affected packages. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#creating-changesets))\n- [ ] Added or updated documentation\n- [ ] Tests for new functionality and regression tests for bug fixes\n- [ ] Screenshots attached (for UI changes)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/backstage/backstage/pull/4271", + "sourceRepo": "backstage/backstage", + "reactions": 16, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:45:46.850Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-5927-search-initial-alpha-release.json b/solutions/cncf-generated/backstage/backstage-5927-search-initial-alpha-release.json new file mode 100644 index 00000000..914230f1 --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-5927-search-initial-alpha-release.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:50.536Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: [Search] Initial, Alpha Release", + "description": "## Hey, I just made a Pull Request!\n\nWith this PR, we are formally releasing the new Backstage Search Platform, including:\n\n- A highly extensible search backend, responsible for document indexing and query handling\n- A no-config-needed, out-of-the-box, in-memory search engine (with an API in place to support other search engines)\n- Support for indexing the contents of the software catalog (with an API in place to index other types of content)\n- A simple scheduler for refreshing the search index as the software catalog (or other content) changes over time\n- A highly composable frontend for tailoring the search experience for your organization's needs\n\ncloses #5716 \n\n#### :heavy_check_mark: Checklist\n\n- [ ] A changeset describing the change and affected packages. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#creating-changesets))\n- [ ] Added or updated documentation\n- [ ] Tests for new functionality and regression tests for bug fixes\n- [ ] Screenshots at", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "And @adamdmharvey...\n\n> _...Do the same changes need to be made to the CLI create-app templates as well so newly created apps use Search Pre-Alpha out of the gate?_\n\nAbsolutely! I was starting to go down that path when I realized it's not really possible to include a change to `create-app` that relies on API changes that haven't been released yet (because part of the e2e test run is creating, installing, and running via `create-app`. So that'll be a follow-up PR, for sure.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/backstage/backstage/pull/5927", + "sourceRepo": "backstage/backstage", + "reactions": 12, + "comments": 8 + }, + "security": { + "scannedAt": "2026-02-27T17:45:50.536Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-6375-implement-defaulttechdocscollator.json b/solutions/cncf-generated/backstage/backstage-6375-implement-defaulttechdocscollator.json new file mode 100644 index 00000000..2a3f5b1a --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-6375-implement-defaulttechdocscollator.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:48.054Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: Implement DefaultTechDocsCollator", + "description": "## Hey, I just made a Pull Request!\n\nRef: https://github.com/backstage/backstage/issues/4568\n* Implements a collator for tech docs.\n * Retrieves mkdocs created search index for entities that have documentation configured\n* Registers collator to expose tech docs content to be searchable\n* Modifies example search to contain tech docs\n * Displays docs results with link to docs and the entity name as title.\n* Adds pagination to example search\n* Creates a reusable type filter to be located in the search package.\n\nScreenshot:\n![image](https://user-images.githubusercontent.com/2392775/125741991-7a35017f-6f04-4837-9003-8c86f2c667da.png)\n\nThis unblocks: https://github.com/backstage/backstage/issues/4781\n\n#### :heavy_check_mark: Checklist\n\n- [x] A changeset describing the change and affected packages. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#creating-changesets))\n- [ ] Added or updated documentation\n- [x] Tests for new functionality and regression tests ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "If we want to include all searchables on that single search screen (which I assume we do right?) we could probably create a new `DocumentationResultListItem` component to display these documentation searches as a one clump per entity. Mkdocs search_index includes some duplication on texts that have subtitles (parent item contains texts from subitems).", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/backstage/backstage/pull/6375", + "sourceRepo": "backstage/backstage", + "reactions": 14, + "comments": 9 + }, + "security": { + "scannedAt": "2026-02-27T17:45:48.054Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-6651-implement-in-context-search-bar-for-tech-docs.json b/solutions/cncf-generated/backstage/backstage-6651-implement-in-context-search-bar-for-tech-docs.json new file mode 100644 index 00000000..0e0b375e --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-6651-implement-in-context-search-bar-for-tech-docs.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:52.645Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: Implement in-context search bar for tech docs.", + "description": "## Hey, I just made a Pull Request!\n\n* Add in-context search functionality to tech docs\n* Use the existing backend search functionality with tech docs specific filters\n* Use material-ui autocomplete to display 10 first search results\n* Add tests\n\nhttps://user-images.githubusercontent.com/2392775/127490500-bfeddbf1-4f6c-4df6-8931-85b4021f30bc.mp4\n \n#### :heavy_check_mark: Checklist\n\n- [x] A changeset describing the change and affected packages. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#creating-changesets))\n- [n/a] Added or updated documentation\n- [x] Tests for new functionality and regression tests for bug fixes\n- [x] Screenshots attached (for UI changes)\n- [x] All your commits have a `Signed-off-by` line in the message. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#developer-certificate-of-origin))", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hey, what's the status of this one? @iamEAP are we about to give this another look?\n\n@Xantier Seems like you'll need to regenerate the api report.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/backstage/backstage/pull/6651", + "sourceRepo": "backstage/backstage", + "reactions": 11, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:45:52.645Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-8252-add-swimlanes-to-scaffolder-page.json b/solutions/cncf-generated/backstage/backstage-8252-add-swimlanes-to-scaffolder-page.json new file mode 100644 index 00000000..de811941 --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-8252-add-swimlanes-to-scaffolder-page.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:49.166Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "backstage: Add swimlanes to scaffolder page", + "description": "## Hey, I just made a Pull Request!\n\nAdd swimlane filtering to the scaffolder page. This will allow\ndevelopers to surface specific templates ahead of others or group\ntemplates together. This is just a first pass, in the future we can also\nall developers to customize the listing component for fancier one\n\nFixes https://github.com/backstage/backstage/issues/6661\n\nSigned-off-by: jrusso1020 \n\n#### :heavy_check_mark: Checklist\n\n- [x] A changeset describing the change and affected packages. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#creating-changesets))\n- [ ] Added or updated documentation\n- [ ] Tests for new functionality and regression tests for bug fixes\n- [x] Screenshots attached (for UI changes)\n- [x] All your commits have a `Signed-off-by` line in the message. ([more info](https://github.com/backstage/backstage/blob/master/CONTRIBUTING.md#developer-certificate-of-origin))\n\n## Implemented example\n\"grouped-templ", {\r\n const filtered = extraSwimlanes.map(swimlane => swimlane.filter(entity))\r\n return filtered.some(true)\r\n }\r\n}\r\n\r\n...\r\n{extraSwimlanes &&\r\n extraSwimlanes.map(swimlane => (\r\n \r\n ))}\r\n \r\n }" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/backstage/backstage/pull/8252", + "sourceRepo": "backstage/backstage", + "reactions": 14, + "comments": 6 + }, + "security": { + "scannedAt": "2026-02-27T17:45:49.166Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/bank-vaults/bank-vaults-1613-operator-filter-vault-operator-cache-to-reduce-memory-usage.json b/solutions/cncf-generated/bank-vaults/bank-vaults-1613-operator-filter-vault-operator-cache-to-reduce-memory-usage.json new file mode 100644 index 00000000..7e41838e --- /dev/null +++ b/solutions/cncf-generated/bank-vaults/bank-vaults-1613-operator-filter-vault-operator-cache-to-reduce-memory-usage.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:57.977Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "bank-vaults: operator: Filter vault-operator cache to reduce memory usage", + "description": "| Q | A\n| --------------- | ---\n| Bug fix? | yes\n| New feature? | no\n| API breaks? | no\n| Deprecations? | no\n| Related tickets | fixes #1288 \n| License | Apache 2.0\n\n### What's in this PR?\nUpgrade controller-runtime to latest release (v0.11.2) and filter cache to only target those resources created by the controller/labeled with common keys. `app.kubernetes.io/name in (vault, vault-configurator)`\n\n### Why?\nIn clusters with large number of configmaps, vault-operator memory usage is very high (~2Gi for 60k configmaps). While no cluster should have that many, I think we don't need to cache all of those.\n\n### Additional context\nIn previous conditions, using v1.15.2, this is the memory usage ~1m after launch:\n\n```\n# k top pods -n keos-core vault-operator-86b8bf4596-gqpgr\nNAME CPU(cores) MEMORY(bytes) \nvault-operator-86b8bf4596-gqpgr 85m 2091Mi\n```\n\nThis is the memory usage with the changes included in this PR ", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "| Q | A\n| --------------- | ---\n| Bug fix? | yes\n| New feature? | no\n| API breaks? | no\n| Deprecations? | no\n| Related tickets | fixes #1581\n| License | Apache 2.0\n\n### What's in this PR?\nConvert `map[interface{}]interface{}` to `map[string]interface{}` before sending the config to Vault API.\nThat's because the config data can have a sub dict (like `provider_config` in JWT/OIDC).\n\nWithout this conversion, Vault API will retrun the following error:\n```\njson: unsupported type: map[interface {}]interface {}\n```\n\n### Why?\nThis issue has been already fixed in #1247 however, it was removed (unintentionally) in the refactoring of v1.5 which caused #1581.\n\n### Additional context\n\n### Checklist\n\n- [x] Code meets the [Developer Guide](https://github.com/banzaicloud/developer-guide)\n- [x] User guide and development docs updated (if needed)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# k top pods -n keos-core vault-operator-86b8bf4596-gqpgr\r\nNAME CPU(cores) MEMORY(bytes) \r\nvault-operator-86b8bf4596-gqpgr 85m 2091Mi", + "# k top pods -n keos-core vault-operator-65d6c98759-wvbwp \r\nNAME CPU(cores) MEMORY(bytes) \r\nvault-operator-65d6c98759-wvbwp 2m 33Mi", + "json: unsupported type: map[interface {}]interface {}" + ] + } + }, + "metadata": { + "tags": [ + "bank-vaults", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "bank-vaults" + ], + "targetResourceKinds": [ + "Pod", + "Configmap" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/bank-vaults/bank-vaults/pull/1613", + "sourceRepo": "bank-vaults/bank-vaults", + "reactions": 4, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:47:57.977Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/buildpacks/buildpacks-1921-added-targets-flag-for-buildpack-new-cli.json b/solutions/cncf-generated/buildpacks/buildpacks-1921-added-targets-flag-for-buildpack-new-cli.json new file mode 100644 index 00000000..dcba0c7a --- /dev/null +++ b/solutions/cncf-generated/buildpacks/buildpacks-1921-added-targets-flag-for-buildpack-new-cli.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:04.133Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "buildpacks: added targets flag for buildpack new cli", + "description": "## Summary\n\n- added new flag `--targets` to `pack buildpack new` with shorthand `-t`\n- now this new flag `--targets` will take args in format [os][/arch][/archvariant]:[distroName@version@another-version];[another-distro@v1@v2]\n- mark `--stacks` flag deprecated\n- removed the default value of stacks, so `id` of stacks no more exists in file `buildpack.toml`\n- remove `[[stacks]]` from `buildpack.toml` file\n- will add a default `--target` of current device when `target` flag is not specified \n- will show an error on unknown target is added from cli (only for [os] [arch] [archVariant]) but still adds those targets to final output of `buildpack.toml` file\n\n## Output\n\n#### Before\n` pack buildpack new test`\n```toml\napi = \"0.8\"\nWithWindowsBuild = false\nWithLinuxBuild = false\n\n[buildpack]\n id = \"test\"\n version = \"1.0.0\"\n\n[[stacks]]\n id = \"io.buildpacks.stacks.jammy\"\n\n```\n\n#### After\n`pack buildpack new test -t \"linux/arm/v6\" -t \"linux/amd64\" -t \"windows/amd64:windows-nano@10.0.19041.1415\" -t", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> @jjbustamante i am unsure why the tests are failing & i tried to run tests on main branch & undoing all changes i made in my current branch, but still getting same result :( are these tests are failing due to my changes ?\n\nLet me check, I tried yesterday to check the logs but I was not able to.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "#### After\r\n`pack buildpack new test -t \"linux/arm/v6\" -t \"linux/amd64\" -t \"windows/amd64:windows-nano@10.0.19041.1415\" -t \"linux/arm/v6:ubuntu@14.04\" -t \"linux/arm/v6:ubuntu@16.04\" -t \"linux/arm/v6:ubuntu@16.01@16.02;debian@10.10@8.06\"`", + "## Documentation\r\n\r\n\r\n\r\n- Should this change be documented?\r\n - [x] Yes, see [# doc for `--targets` flag](https://github.com/WYGIN/buildpacks-pack/blob/90c2195d1493590e106a4403b9d2b15f7b73bd26/internal/commands/buildpack_new.go#L130)\r\n - [ ] No\r\n\r\n## Related\r\n\r\n\r\nResolves #1918\n\n@jjbustamante i am unsure why the tests are failing & i tried to run tests on main branch & undoing all changes i made in my current branch, but still getting same result :( are these tests are failing due to my changes ?\n> @jjbustamante i am unsure why the tests are failing & i tried to run tests on main branch & undoing all changes i made in my current branch, but still getting same result :( are these tests are failing due to my changes ?\r\n\r\nLet me check, I tried yesterday to check the logs but I was not able to. \nYes, You are getting an error like this:\r\n\r\n\"Screenshot", + "It seems to be because of the new argument, the `Got` expectation doesn't know about the new platform values. I think you may need to re-create the Mock. \nI think I know what is going on, if you take a look at your change in [internal/commands/buildpack_new_test.go](https://github.com/buildpacks/pack/pull/1921/files#diff-f51bc37c8d2ae5443008386b25498b4ea07f3d3e6eff94edbe55bcd685edec01) you said:\r\n\r\n- I am expecting the pack client to be called with these arguments, and you added the `Targets` stuff\r\n\r\nBut in the line 78" + ] + } + }, + "metadata": { + "tags": [ + "buildpacks", + "incubating", + "app-definition", + "type-enhancement" + ], + "category": "workloads", + "cncfProjects": [ + "buildpacks" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/buildpacks/pack/pull/1921", + "sourceRepo": "buildpacks/pack", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:04.133Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/buildpacks/buildpacks-2086-implementation-of-the-multi-platform-support-for-builders-and-bu.json b/solutions/cncf-generated/buildpacks/buildpacks-2086-implementation-of-the-multi-platform-support-for-builders-and-bu.json new file mode 100644 index 00000000..168cc4d0 --- /dev/null +++ b/solutions/cncf-generated/buildpacks/buildpacks-2086-implementation-of-the-multi-platform-support-for-builders-and-bu.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:05.885Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "buildpacks: Implementation of the multi-platform support for builders and buildpack packages RFC 0128", + "description": "## Summary\nThe purpose of this PR is to implement the RFC [0128](https://github.com/buildpacks/rfcs/blob/main/text/0128-multiarch-builders-and-package.md). \n\nIt adds the capability to the `pack buildpack package` and `pack builder create` to generate multi-platform OCI images and create an image index to combine them.\n\nA draft version of this PR was demo during KubeCon EU 24. See the recording [here](https://youtu.be/cenTw6WzQv8?si=_vi0P4aJujT6pZSS)\n\n## Output\n\n#### Before\n\n- We were not able to create multi-platform buildpacks and builders \n\n#### After\n\n- Follow the RFC documentation to specify `Targets` entries in the `buildpack.toml` or `package.toml` files.\n\nCheck the document mentioned above, it has a detailed output\n\n## Documentation\n\n- Should this change be documented?\n - [X] Yes, see \n - [ ] No\n\n## Related\n\nResolves #1459\nResolves #2079", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR updates the pipeline builder create-package workflow automation to support multi-arch builds of this buildpack (ARM64 and AMD64). **The ARM64 image should be considered experimental for now**, since the official [Paketo RFC](https://github.com/paketo-buildpacks/rfcs/pull/288) and [CNB RFCs](https://github.com/buildpacks/rfcs/pull/295) related to multi-arch have not been merged in yet.\nIt uses an **experimental version of the pack CLI** (https://github.com/buildpacks/pack/pull/2086), and is based off of the upstream CNB RFC that is currently IN PROGRESS. This workflow will definitely be subject to change when an official `pack` release comes out, and if there are any changes to the upstream RFC.\nOnce we have merged this and seen it work on `main`, we will contribute it into the upstream pipeline-builder repository.\n(@sophiewigmore )\n\n## Checklist\n\n* [ ] I have viewed, signed, and submitted the Contributor License Agreement.\n* [ ] I have linked issue(s) that this PR should close using keywords or the Github UI (See [docs](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue))\n* [ ] I have added an integration test, if necessary.\n* [ ] I have reviewed the [styleguide](https://github.com/paketo-buildpacks/community/blob/main/STYLEGUIDE.md) for guidance on my code quality.\n* [ ] I'm happy with the commit history on this PR (I have rebased/squashed as needed).", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Flag](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | Coverage Δ | |\n|---|---|---|\n| [os_linux](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `74.93% <21.32%> (-3.69%)` | :arrow_down: |\n| [os_macos](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `72.72% <20.28%> (-3.58%)` | :arrow_down: |\n| [os_windows](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `75.40% <21.32%> (-3.68%)` | :arrow_down: |\n| [unit](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `75.98% <21.32%> (-3.71%)` | :arrow_down: |\n\nFlags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks#carryforward-flags-in-the-pull-request-comment) to find out more.\n\n
\nHello 👋\r\nWe have been testing the latest version", + "from \r\nhttps://github.com/buildpacks/pack/actions/runs/8118576298\r\n\r\nWe had success running, from the ~/buildpack directory, using:", + "with a layout similar to this:" + ] + } + }, + "metadata": { + "tags": [ + "buildpacks", + "incubating", + "app-definition", + "type-enhancement", + "type-chore" + ], + "category": "workloads", + "cncfProjects": [ + "buildpacks" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/buildpacks/pack/pull/2086", + "sourceRepo": "buildpacks/pack", + "reactions": 2, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:46:05.885Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/buildpacks/buildpacks-2107-release-noble-numbat.json b/solutions/cncf-generated/buildpacks/buildpacks-2107-release-noble-numbat.json new file mode 100644 index 00000000..c83c9e32 --- /dev/null +++ b/solutions/cncf-generated/buildpacks/buildpacks-2107-release-noble-numbat.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:01.715Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "buildpacks: Release Noble Numbat", + "description": "Ubunut has pushed out a docker image for [Noble](https://hub.docker.com/_/ubuntu). There is not a pack cli in [buildpacks launchpad](https://ppa.launchpadcontent.net/cncf-buildpacks/pack-cli/ubuntu/dists/) for Noble\n\n## Summary\n\n- Update Github workflows to release a package for Ubuntu 22.04 (Noble Numbat)\n\n## Output\n\nUpload pack to the buildpacks PPA launchpad for Noble\n\n#### Before\n\n- No package existed for Noble\n\n#### After\n\n- Package exists for Noble\n\n## Documentation\n\n- Should this change be documented?\n - [ ] Yes, see #___\n - [x ] No\n\n## Related\n\nResolves #2152", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "We are going to need this soon also. This is great.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "buildpacks", + "incubating", + "app-definition", + "type-chore" + ], + "category": "workloads", + "cncfProjects": [ + "buildpacks" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/buildpacks/pack/pull/2107", + "sourceRepo": "buildpacks/pack", + "reactions": 12, + "comments": 3 + }, + "security": { + "scannedAt": "2026-02-27T17:46:01.715Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/buildpacks/buildpacks-2123-fix-pack-buildpack-new-targets.json b/solutions/cncf-generated/buildpacks/buildpacks-2123-fix-pack-buildpack-new-targets.json new file mode 100644 index 00000000..ce9bf1c2 --- /dev/null +++ b/solutions/cncf-generated/buildpacks/buildpacks-2123-fix-pack-buildpack-new-targets.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:03.230Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "buildpacks: Fix pack buildpack new --targets", + "description": "## Summary\n\n## Output\n\n#### Before\n\n#### After\n\n## Documentation\n\n- Should this change be documented?\n - [ ] Yes, see #___\n - [ ] No\n\n## Related\n\nResolves #2120\nResolves #2121", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Summary\n\nProof of concept for [LFX mentorship: BuildKit explore](https://mentorship.lfx.linuxfoundation.org/project/2c5ced86-d23b-41f5-aec3-59730e29f092)\n\n## Output\n\n#### Before\n\n#### After\n\n## Documentation\n\n- Should this change be documented?\n - [ ] Yes, see #___\n - [ ] No\n\n## Related\n\nResolves #___", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Flag](https://app.codecov.io/gh/buildpacks/pack/pull/2123/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | Coverage Δ | |\n|---|---|---|\n| [os_linux](https://app.codecov.io/gh/buildpacks/pack/pull/2123/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `69.32% <85.42%> (+0.01%)` | :arrow_up: |\n| [os_macos](https://app.codecov.io/gh/buildpacks/pack/pull/2123/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `65.90% <85.42%> (+0.02%)` | :arrow_up: |\n| [os_windows](https://app.codecov.io/gh/buildpacks/pack/pull/2123/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `69.73% <85.42%> (+0.02%)` | :arrow_up: |\n| [unit](https://app.codecov.io/gh/buildpacks/pack/pull/2123/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `70.21% <85.42%> (+0.02%)` | :arrow_up: |\n\nFlags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks#carryforward-flags-in-the-pull-request-comment) to find out more.\n\n
\nI did the following test, I tried to package the Buildpack generated. \r\n\r\nUsing the branch binary everything is fine, as expected", + "And then, I tried to use the current `pack 0.33.2` version", + "As expected an error must be thrown because we changed the toml schema, my only concern is, should we add a helpful message for end-users to point them out to update:\r\n- `distributions` -> `distros`\r\n- `versions` -> `version` (single value instead of an array) \r\n\r\nThis could happen if we try to execute the new pack version, containing this fix, with old toml schema \nSorry about all the messages. \r\n\r\nThe new pack version containing this feature is not going to fail with a buildpack using the previous schema, so things could change for end-users and they wouldn't notice\r\n\r\nThese examples uses the binary for this branch and package the same buildpack with the previous and the new schema" + ] + } + }, + "metadata": { + "tags": [ + "buildpacks", + "incubating", + "app-definition", + "type-enhancement" + ], + "category": "workloads", + "cncfProjects": [ + "buildpacks" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/buildpacks/pack/pull/2123", + "sourceRepo": "buildpacks/pack", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:03.230Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cadence-workflow/cadence-workflow-2537-wip-add-support-for-postgres-persistence.json b/solutions/cncf-generated/cadence-workflow/cadence-workflow-2537-wip-add-support-for-postgres-persistence.json new file mode 100644 index 00000000..19591237 --- /dev/null +++ b/solutions/cncf-generated/cadence-workflow/cadence-workflow-2537-wip-add-support-for-postgres-persistence.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:40.000Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cadence-workflow: [WIP] Add support for postgres persistence", + "description": "I did the first steps for adding postgres support.\nFixes https://github.com/uber/cadence/issues/2536 and https://github.com/uber/cadence/issues/1834\n\n- [x] Add basic support for postgres\n- [x] Migrate the schema files\n- [ ] Add postgres tests\n- [ ] Fix postgres tests", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[![CLA assistant check](https://cla-assistant.io/pull/badge/signed)](https://cla-assistant.io/uber/cadence?pullRequest=2537)
All committers have signed the CLA.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "--- FAIL: TestPostgresConnTestSuite (0.01s)\r\n\r\n --- PASS: TestPostgresConnTestSuite/TestParseCQLFile (0.00s)\r\n\r\n --- FAIL: TestPostgresConnTestSuite/TestSQLConn (0.00s)\r\n\r\n require.go:765: \r\n\r\n \tError Trace:\tconn_test.go:127\r\n\r\n \tError: \tExpected nil, but got: &errors.errorString{s:\"unsupported database driver: postgres\"}\r\n\r\n \tTest: \tTestPostgresConnTestSuite/TestSQLConn", + "# github.com/uber/cadence/common/persistence/sql/storage/mysql\r\n--\r\n  | common/persistence/sql/storage/mysql/db.go:36:5: cannot use (*DB)(nil) (type *DB) as type sqldb.Tx in assignment:\r\n  | *DB does not implement sqldb.Tx (missing DeleteFromEvents method)\r\n  | common/persistence/sql/storage/mysql/db.go:37:5: cannot use (*DB)(nil) (type *DB) as type sqldb.Interface in assignment:\r\n  | *DB does not implement sqldb.Interface (missing DeleteFromEvents method)\r\n  | common/persistence/sql/storage/mysql/db.go:57:14: cannot use NewDB(mdb.db, xtx) (type *DB) as type sqldb.Tx in return argument:\r\n  | *DB does not implement sqldb.Tx (missing DeleteFromEvents method)\r\n  | # github.com/uber/cadence/common/persistence/sql/storage/postgres\r\n  | common/persistence/sql/storage/postgres/db.go:36:5: cannot use (*DB)(nil) (type *DB) as type sqldb.Tx in assignment:\r\n  | *DB does not implement sqldb.Tx (missing DeleteFromEvents method)\r\n  | common/persistence/sql/storage/postgres/db.go:37:5: cannot use (*DB)(nil) (type *DB) as type sqldb.Interface in assignment:\r\n  | *DB does not implement sqldb.Interface (missing DeleteFromEvents method)\r\n  | common/persistence/sql/storage/postgres/db.go:57:14: cannot use NewDB(mdb.db, xtx) (type *DB) as type sqldb.Tx in return argument:\r\n  | *DB does not implement sqldb.Tx (missing DeleteFromEvents method)\r\n  | Makefile:118: recipe for target 'cadence' failed", + "# github.com/uber/cadence/common/persistence/sql/storage/postgres\r\n--\r\n  | common/persistence/sql/storage/postgres/db.go:36:5: cannot use (*DB)(nil) (type *DB) as type sqldb.Tx in assignment:\r\n  | *DB does not implement sqldb.Tx (missing GetLastEnqueuedMessageIDForUpdate method)\r\n  | common/persistence/sql/storage/postgres/db.go:37:5: cannot use (*DB)(nil) (type *DB) as type sqldb.Interface in assignment:\r\n  | *DB does not implement sqldb.Interface (missing GetLastEnqueuedMessageIDForUpdate method)\r\n  | common/persistence/sql/storage/postgres/db.go:57:14: cannot use NewDB(mdb.db, xtx) (type *DB) as type sqldb.Tx in return argument:\r\n  | *DB does not implement sqldb.Tx (missing GetLastEnqueuedMessageIDForUpdate method)\r\n  | Makefile:118: recipe for target 'cadence' failed" + ] + } + }, + "metadata": { + "tags": [ + "cadence-workflow", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "cadence-workflow" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cadence-workflow/cadence/pull/2537", + "sourceRepo": "cadence-workflow/cadence", + "reactions": 6, + "comments": 28 + }, + "security": { + "scannedAt": "2026-02-27T17:46:40.000Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cartography/cartography-232-fix-use-regional-clients-for-s3-buckets.json b/solutions/cncf-generated/cartography/cartography-232-fix-use-regional-clients-for-s3-buckets.json new file mode 100644 index 00000000..917a3813 --- /dev/null +++ b/solutions/cncf-generated/cartography/cartography-232-fix-use-regional-clients-for-s3-buckets.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:44.510Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cartography: fix: use regional clients for s3 buckets", + "description": "Closes #231", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi there! I haven't had a chance to dig in deep yet, but is there any chance that this is related to this PR? https://github.com/lyft/cartography/pull/146", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "The output confirms, if you're using an opt-in region, you must have a matching client/server region for s3 APIs to work properly. If you'd like, I can modify the code to only use specific regional s3 clients if the regions are opt-in. \r\n\r\n![image](https://user-images.githubusercontent.com/1248221/73088534-74988c80-3ea2-11ea-8d15-86eb0d850228.png)\r\n\r\n\r\nEDIT: Forgot to add, I tested using the latest version of boto/botocore to be sure that's not the issue.\n@flyinbutrs Thanks for your detailed writeup!\r\nWe're first going to (omg finally) put #146 through internal testing early next week since it also deals with regions. Would you be able to test if that change fixes the `IllegalConstraint` error? I suspect it might not since it doesn't check for opt-in, but I'm still curious. \r\n\r\nAfter merging #146 I think we should do something to incorporate your \"opt-in logic\" in, as I don't think s3.py should call `describe-regions`. \r\n\r\nRather, once #146 is merged and if you experience the same exception, I think it would make more sense to put that opt-in logic in `cartography.intel.aws.sync_one_account()`.\r\n\r\nWhat do you think?\nYeah, I can checkout that branch and retry, no prob. The `opted-in` region concept is new. I suspect they're trying to minimize the number of resources that need to be replicated to new regions as they keep adding more every year.\nJust ran a test, same bug is present with #146 merged in. I can rebase mine on that branch and then repush. I think it will require a slightly different tweak to get the `ec2` calls out of `s3.py`, but should still work fine. \r\n\r\nDo you want me to create an s3 client per region, or just for the `opted-in` regions? I can do either way.\n@flyinbutrs Going to do some investigation and digging\n@flyinbutrs I'm going to try to summarize things. Admittedly I am confused, can you check if my understanding is correct? Here goes:\r\n\r\nSome newer AWS regions require your AWS account to be [opted](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) [in](https://aws.amazon.com/blogs/security/setting-permissions-to-enable-accounts-for-upcoming-aws-regions/) to perform actions on them.\r\n\r\nWith boto3, you can point your HTTP calls to a specific region in the `client` object, the `session` object, `credentials` file, and `config` file. As seen in boto/boto3#781, you can define a `{CreateBucket/GetBucket/Whatever}Configuration` object to specify which region your s3 call will hit.\r\n\r\nCartography's s3 sync [does not specify region in the boto3 `client`, and it does not specify region in the boto3 `session`](https://github.com/lyft/cartography/blob/6803357bb8f693e8706bb329e52ffc9a3ee052e1/cartography/intel/aws/__init__.py#L22). Therefore, Cartography's s3 sync is probably relying on the `config` or the `credentials` files, which results in confusing behavior including the problem you found.\r\n\r\nI re-read your writeup and your code, but I'm not sure what you mean about a \"client\" versus \"server\". \r\n\r\nIn any case, after reading that boto3 issue, I think that the right solution would be to supply a `regions` object to the s3 sync (like in [these lines](https://github.com/lyft/cartography/blob/6803357bb8f693e8706bb329e52ffc9a3ee052e1/cartography/intel/aws/__init__.py#L37-L39)). If need be, we can limit the regions supplied to the s3 sync to only those that have been opted-in.\r\n\nclient = `boto3.client`\r\nserver = the AWS managed S3 endpoint\r\n\r\nWhile boto/boto3#781 is referring to the same issue, they're not actually giving the complete response. Based on my testing above, the opt-in regions require _any_ API calls made to a bucket in those regions to have the region encoded in the `client` or `session`, `config` or `credentials` files.\r\n\r\nI think the correct fix for this issue is as follows (and I'm happy to update the PR, though it may take a few days):\r\n\r\n1. pass in the `regions` object to the s3 sync.\r\n2. use region specific s3 clients for opted-in regions.\r\n\r\n\n@flyinbutrs\r\n\r\n> 1. pass in the regions object to the s3 sync.\r\n\r\nRight, and that would look like", + "(side note: we might want to dig in to way IAM doesn't need regions right now but one problem at a time lol)\r\n\r\nAlso I'm just sketching this out and it's untested and all that.\r\n\r\n> 2. use region specific s3 clients for opted-in regions.\r\n\r\nYeah I agree with this change too. We can change [ec2.get_ec2_regions()](https://github.com/lyft/cartography/blob/4ac824471cb13dd4d9dab5e23cf7a9ae9807d3d9/cartography/intel/aws/ec2.py#L22) to return only opted-in regions; maybe something like this (incorporating your code):", + "Then we would need to create s3 clients with the proper regions, which makes us need to change [s3.sync()](https://github.com/lyft/cartography/blob/4ac824471cb13dd4d9dab5e23cf7a9ae9807d3d9/cartography/intel/aws/s3.py#L318) like this:" + ] + } + }, + "metadata": { + "tags": [ + "cartography", + "sandbox", + "app-definition", + "stale" + ], + "category": "workloads", + "cncfProjects": [ + "cartography" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cartography-cncf/cartography/pull/232", + "sourceRepo": "cartography-cncf/cartography", + "reactions": 0, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:46:44.511Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cedar/cedar-123-resolve-issue-114-expose-evaluationerror.json b/solutions/cncf-generated/cedar/cedar-123-resolve-issue-114-expose-evaluationerror.json new file mode 100644 index 00000000..64fa065c --- /dev/null +++ b/solutions/cncf-generated/cedar/cedar-123-resolve-issue-114-expose-evaluationerror.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:49.455Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cedar: Resolve: Issue 114 Expose-EvaluationError", + "description": "closes #114 \n\n## Reviewed by Kesha Hietala (khieta)\n\nSo, after some digging, I found the EvaluationError enum in `cedar-policy-core/src/evaluator/err.rs`‎ which is imported by the `cedar/cedar-policy-core/src/evaluator.rs` on lines 25-27.\n\nI haven't tested this Idea but using that information and the TODO I arrived at a solution.\n> /// TODO in the future this can/should be the actual Core `EvaluationError\n\n[!] There's an unfinished comment on line 447 of api.rs I meant to say something like `[+] Modified to Resolve TODO `\n\n### Solution\n`cedar/cedar-policy/src/api.rs`\nImport the EvaluationError enum from `cedar-policy-core` and include it as a variant called CoreError in the ApiEvaluationError enum.\n\nThe #[from] attribute allows for automatic conversion from EvaluationError to ApiEvaluationError::CoreError.\n\nConsistent error handling experience leverages the error types and patterns defined in cedar-policy-core. \nThis ensures that any changes made to cedar-policy-core's EvaluationError ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Description of changes\n\nThis PR adds the `AuthorizationError` type discussed in PR #123. I argue that this is a non-breaking change because errors returned by authorization were never truly exposed to users of `cedar-policy` -- they were (and still are) exposed only through a `String`. This PR slightly modifies the `String` that users will see for certain types of errors.\n\nIn particular, where before they may have seen a message like\n```\nwhile evaluating policy policy0, encountered the following error: type error: expected bool, got long\n```\nThey will now see\n```\nerror occurred while evaluating policy `policy0`: type error: expected bool, got long\n```\n\nThe reason for the large diff is that we rely heavily on this type of error message in our integration tests.\n\nI recommend that this PR be included in our next patch release.\n\n## Issue #, if available\n\nRelated to #114\n\n## Checklist for requesting a review\n\nThe change in this PR is (choose one, and delete the other options):\n\n- [x] A change \"invisible\" to users (e.g., documentation, changes to \"internal\" crates like `cedar-policy-core`, `cedar-validator`, etc.)\n\nI confirm that this PR (choose one, and delete the other options):\n\n- [x] Does not update the CHANGELOG because my change does not significantly impact released code.\n\nI confirm that [`cedar-spec`](https://github.com/cedar-policy/cedar-spec) (choose one, and delete the other options):\n\n- [x] Does not require updates because my change does not impact the Cedar Dafny mo", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "### Additional changes\r\n\r\nThere are 8 references to EvaluationError that would need to be changed.\r\nHere's an example using line 121-132 from cedar-policy/src/api.rs.", + "## Further changes\r\n`cedar-policy-core/src/authorizer.rs`\r\nThere are a few `Vec` calls that need to be updated.\r\nFor example, in the Diagnostics struct.\r\n- We need to update the Diagnostics struct to use `Vec` for the errors field instead of `Vec`", + "- Modify the Response struct's 'new' function to accept a `Vec` for the errors parameter and update the construction of diagnostics." + ] + } + }, + "metadata": { + "tags": [ + "cedar", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "cedar" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cedar-policy/cedar/pull/123", + "sourceRepo": "cedar-policy/cedar", + "reactions": 0, + "comments": 29 + }, + "security": { + "scannedAt": "2026-02-27T17:46:49.456Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-1084-godaddy-dns-provider.json b/solutions/cncf-generated/cert-manager/cert-manager-1084-godaddy-dns-provider.json new file mode 100644 index 00000000..43a175ee --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-1084-godaddy-dns-provider.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:06.416Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Godaddy dns provider", + "description": "**What this PR does / why we need it**: \nAdd Godaddy as a DNS01 provider\n\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #\n\ncloses #1083\n\n**Special notes for your reviewer**:\n\n**Release note**:\n\n```release-note\nAdd Godaddy as a DNS01 provider\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR will not be merged into the repo in its current form - we have recently extended out the DNS provider code to allow for *out of tree* DNS providers, as we as a small development team are unable to maintain and manage automated testing for such a large number of DNS01 providers.\n\nMore info can be read here: https://groups.google.com/forum/#!topic/cert-manager-dev/CgoMxSP6DSI\n\nThis information *should* have been posted on this PR around the same time as being posted to the mailing list - it appears we missed this one!\n\nI am intending to go through all DNS01 provider PRs over the next week or so, closing them and advising how someone can create their own DNS01 webhook provider. We currently have a working webhook 'example repo' here: https://github.com/jetstack/cert-manager-webhook-example, as well as a test suite that DNS provider developers can run against their own code to ensure their webhook is 'conformant'.\n\nThere will be better documentation produced for the new webhook code over the coming weeks too - this particular feature has been a long time coming, and probably should have been done a long time ago (there are upwards of 15 DNS provider PRs currently open against this repo!)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "size-l", + "release-note", + "needs-rebase", + "area-api", + "kind-documentation", + "needs-ok-to-test", + "area-acme", + "dco-signoff--yes", + "area-acme-dns01" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/1084", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 8, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:44:06.416Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-243-allow-non-static-aws-credentials-for-route-53.json b/solutions/cncf-generated/cert-manager/cert-manager-243-allow-non-static-aws-credentials-for-route-53.json new file mode 100644 index 00000000..99268a30 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-243-allow-non-static-aws-credentials-for-route-53.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:12.049Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Allow non-static AWS credentials for Route 53.", + "description": "**What this PR does / why we need it**:\nThis change maintains backwards compatibility, but makes the `accessKeyID` and `secretAccessKeySecretRef` fields of the `route53` DNS provider optional.\n\nIf not provided, AWS credentials will be loaded from `AWS_*` environment variables or the EC2 metadata service. This should also work for things that impersonate the EC2 metadata service, such as [kube2iam](https://github.com/jtblin/kube2iam) and [kail](https://github.com/uswitch/kiam).\n\n**Which issue this PR fixes**: fixes #195\n\n**Special notes for your reviewer**:\nThis change will need some manual merging with https://github.com/jetstack/cert-manager/pull/197 since we both modified the same code.\n\n**Release note**:\n```release-note\nAdd support for AWS credentials loaded from environment variables or the EC2 metadata service.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What this PR does / why we need it**:\n\nIt's not unusual to store the aws access and secret keys together in the\nsame secret under different keys. This allows conveniently using that\nsame secret for both.\n\nI have not yet deployed and manually tested this change, but I plan to fairly soon. Feel free to wait on that.\n\n```release-note\nThe AWS Route53 dns01 provider's Access Key must now be configured as a secret reference. If you have any Issuers or CluserIssuers configured with AWS IAM credentials, they **must** be updated to use the new `accessKeyIDSecretRef` field.\n```\n\nNote, this incidentally improves error handling for the other dns providers by refactoring secret-key-data-has-that-key checking into its own function", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "**What this PR does / why we need it**:\r\n\r\nIt's not unusual to store the aws access and secret keys together in the\r\nsame secret under different keys. This allows conveniently using that\r\nsame secret for both.\r\n\r\nI have not yet deployed and manually tested this change, but I plan to fairly soon. Feel free to wait on that." + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "needs-rebase", + "size-m", + "area-acme" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [ + "Service", + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/243", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 6, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:44:12.049Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-2840-wip-implement-route-controller.json b/solutions/cncf-generated/cert-manager/cert-manager-2840-wip-implement-route-controller.json new file mode 100644 index 00000000..b956d694 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-2840-wip-implement-route-controller.json @@ -0,0 +1,60 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:10.387Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: WIP Implement Route Controller", + "description": "**What this PR does / why we need it**:\nThis implements the route controller for openshift routes and allows annotating a route to apply the certificate from the provided secret. If the route API is not found it disables the controller\n\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #1064\n\n**Special notes for your reviewer**:\n\n**Release note**:\n\n```release-note\n```", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "Signed-off-by: raffaelespazzoli \n\n**What this PR does / why we need it**:\nadds injection to openshift routes\nadds ability to present certificates as keystores and javastores\n\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #1064 \n\n**Special notes for your reviewer**:\nThe following is missing:\n1. registering the controllers to the cainjectior manager, need help with that\n2. disabling the route injection controller if routes are not available need .1 first\n3. verify where I added some needed constants (controller/util.go)\n4. unit tests, I didn't have any in my code\n5. documentation.\n\n**Release note**:\n\n```release-note\nadds injection to openshift routes\nadds ability to present certificates as keystores and javastores\n```", + "steps": [ + "registering the controllers to the cainjectior manager, need help with that", + "disabling the route injection controller if routes are not available need .1 first", + "verify where I added some needed constants (controller/util.go)", + "unit tests, I didn't have any in my code", + "documentation." + ], + "codeSnippets": [ + "Signed-off-by: raffaelespazzoli \r\n\r\n\r\n\r\n**What this PR does / why we need it**:\r\nadds injection to openshift routes\r\nadds ability to present certificates as keystores and javastores\r\n\r\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #1064 \r\n\r\n**Special notes for your reviewer**:\r\nThe following is missing:\r\n1. registering the controllers to the cainjectior manager, need help with that\r\n2. disabling the route injection controller if routes are not available need .1 first\r\n3. verify where I added some needed constants (controller/util.go)\r\n4. unit tests, I didn't have any in my code\r\n5. documentation.\r\n\r\n**Release note**:\r\n" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "do-not-merge-release-note-label-needed", + "needs-rebase", + "do-not-merge-work-in-progress", + "do-not-merge-hold", + "size-xxl", + "dco-signoff--yes", + "area-testing", + "ok-to-test", + "area-deploy", + "needs-kind" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/2840", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 6, + "comments": 34 + }, + "security": { + "scannedAt": "2026-02-27T17:44:10.387Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-292-vault-issuer-support.json b/solutions/cncf-generated/cert-manager/cert-manager-292-vault-issuer-support.json new file mode 100644 index 00000000..c40efd11 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-292-vault-issuer-support.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:59.087Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Vault issuer support", + "description": "**What this PR does / why we need it**:\nInitial Vault support. \n\n**Which issue this PR fixes**\nfixes #17 \n\n**Special notes for your reviewer**:\nWhere I work we need support for Vault so I took a shot at it. Please tell me if you guys are interested in what I did. If yes I can write some documentation and rework the code if need be.\n\nThanks\n\n```release-note\nAdd experimental support for Hashicorp Vault issuers\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What this PR does / why we need it**:\n\nIf the CA used is only an intermediate CA, and the root CA is trusted by the client, the client needs help verifying the certificate chain.\n\nThis also makes the CA present in the certificate even if it's the root CA.\n\n**Which issue this PR fixes**:\n\nTrusting certs issued by intermediate CAs used by cert-manager.\n\n**Special notes for your reviewer**:\n\nI have tested this locally with my own intermediate CA used by cert-manager, issued by my own root CA trusted by my macOS client. The whole certificate chain is now presented in the browser.\n\nThe idea to just append the certificates is based on cfssl's mkbundle:\nhttps://github.com/cloudflare/cfssl/blob/1.3.0/cmd/mkbundle/mkbundle.go#L97\n\n**Release note**:\n```release-note\nCA Issuer: bundle CA certificate with issued certificates\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "**What this PR does / why we need it**:\r\n\r\nIf the CA used is only an intermediate CA, and the root CA is trusted by the client, the client needs help verifying the certificate chain.\r\n\r\nThis also makes the CA present in the certificate even if it's the root CA.\r\n\r\n**Which issue this PR fixes**:\r\n\r\nTrusting certs issued by intermediate CAs used by cert-manager.\r\n\r\n**Special notes for your reviewer**:\r\n\r\nI have tested this locally with my own intermediate CA used by cert-manager, issued by my own root CA trusted by my macOS client. The whole certificate chain is now presented in the browser.\r\n\r\nThe idea to just append the certificates is based on cfssl's mkbundle:\r\nhttps://github.com/cloudflare/cfssl/blob/1.3.0/cmd/mkbundle/mkbundle.go#L97\r\n\r\n**Release note**:" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "approved", + "lgtm", + "size-xxl" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/292", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 9, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:43:59.087Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-309-rewrite-acme-issuer-for-v2-and-make-validation-process-asynchro.json b/solutions/cncf-generated/cert-manager/cert-manager-309-rewrite-acme-issuer-for-v2-and-make-validation-process-asynchro.json new file mode 100644 index 00000000..852c362e --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-309-rewrite-acme-issuer-for-v2-and-make-validation-process-asynchro.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:51.433Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Rewrite ACME issuer for v2, and make validation process asynchronous", + "description": "**What this PR does / why we need it**:\n\nAdds support for the ACME v2 protocol in favour of v1\n\n**Which issue this PR fixes**: fixes #273\n\n**Special notes for your reviewer**:\n\n~This PR is not ready **yet**. We need to validate and ensure boulder runs with the v2 protocol enabled during our e2e tests. In future we can switch to something like [pebble](https://github.com/letsencrypt/pebble), a lightweight alternative to boulder.~\n\nI've updated the PR to use Pebble instead of Boulder during e2e tests\n\nI have also copied across the upcoming `golang.org/x/crypto/acme/v2` package from the gerrit codereview so we can get ahead of the curve on testing this: https://go-review.googlesource.com/c/crypto/+/86635\n\n~I have also not tested whether it is possible to obtain wildcard certificates with this **yet**~\nI have managed to obtain a wildcard certificate with this PR!\n\n```\nCertificate:\n Data:\n Version: 3 (0x2)\n Serial Number:\n fa:f5:cc:92:f2:02:15:58:37:05:02:9f:bf:5", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Still TODO: As part of making the authorization process asynchronous we need to make the DNS and HTTP01 challenge solver idempotent (so the names of resources it creates must be looked up instead of stored in memory)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Certificate:\r\n Data:\r\n Version: 3 (0x2)\r\n Serial Number:\r\n fa:f5:cc:92:f2:02:15:58:37:05:02:9f:bf:53:9f:9c:d4:0c\r\n Signature Algorithm: sha256WithRSAEncryption\r\n Issuer: CN=Fake LE Intermediate X1\r\n Validity\r\n Not Before: Mar 23 15:56:39 2018 GMT\r\n Not After : Jun 21 15:56:39 2018 GMT\r\n Subject: CN=*.james-gcp.redacted.net\r\n Subject Public Key Info:", + "Still TODO: As part of making the authorization process asynchronous we need to make the DNS and HTTP01 challenge solver idempotent (so the names of resources it creates must be looked up instead of stored in memory) \n/retest\nCurrently failing due to the aforementioned TODO:", + "Also waiting for this to merge in Pebble: https://github.com/letsencrypt/pebble/pull/94\n> Also waiting for this to merge in Pebble: letsencrypt/pebble#94\r\n\r\n@munnerz That's on my plate for today :+1: \nHey @munnerz, I know you've marked this PR as not ready yet, but I was super-keen to try it out, so went ahead and compiled it myself. It didn't work for me, because the `https://acme-staging-v02.api.letsencrypt.org/acme/new-acct` endpoint was returning `HTTP 200`, but with an empty body which caused the decoding at `third_party/crypto/acme/acme.go:556` to fail." + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "approved", + "lgtm", + "size-xxl" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/309", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 26, + "comments": 32 + }, + "security": { + "scannedAt": "2026-02-27T17:43:51.433Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-3724-istio-virtualservice-for-http01-solving-rebased.json b/solutions/cncf-generated/cert-manager/cert-manager-3724-istio-virtualservice-for-http01-solving-rebased.json new file mode 100644 index 00000000..507de7d0 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-3724-istio-virtualservice-for-http01-solving-rebased.json @@ -0,0 +1,57 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:56.405Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Istio VirtualService for HTTP01 solving (rebased)", + "description": "**This PR is based on this older WIP PR #3011** -> great progress was made here, but PR seems to be stale\n\n**What this PR does / why we need it:**\nAdd support for routing ACME HTTP01 challenges using Istio VirtualService CRs.\n\n**Which issue this PR fixes:**\nfixes #1636 and fixes #2526\n\n```release-note\nAdd support for routing ACME HTTP01 challenges using Istio VirtualService CRs.\n```", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "**What this PR does / why we need it**:\nAdd support for routing ACME HTTP01 challenges using Istio VirtualService CRs.\n\n**Which issue this PR fixes**:\nfixes #1636\n\n**Special notes for your reviewer**:\nThis is a PoC. Outstanding TODOs off the top of my head:\n- [x] add unit tests\n- [ ] add e2e test\n- [ ] support cert-manager installation without Istio CRDs\n- [ ] add docs to Issuer CRD\n- [ ] re-enable Ingress based challenge routing\n\n**Release note**:\n\n```release-note\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "**What this PR does / why we need it**:\r\nAdd support for routing ACME HTTP01 challenges using Istio VirtualService CRs.\r\n\r\n**Which issue this PR fixes**:\r\nfixes #1636\r\n\r\n**Special notes for your reviewer**:\r\nThis is a PoC. Outstanding TODOs off the top of my head:\r\n- [x] add unit tests\r\n- [ ] add e2e test\r\n- [ ] support cert-manager installation without Istio CRDs\r\n- [ ] add docs to Issuer CRD\r\n- [ ] re-enable Ingress based challenge routing\r\n\r\n**Release note**:\r\n" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "approved", + "lgtm", + "area-api", + "kind-feature", + "size-xxl", + "area-acme", + "area-vault", + "dco-signoff--yes", + "area-testing" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/3724", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 11, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:43:56.405Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-3828-feat-add-support-to-secrettemplates.json b/solutions/cncf-generated/cert-manager/cert-manager-3828-feat-add-support-to-secrettemplates.json new file mode 100644 index 00000000..b6fcafe0 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-3828-feat-add-support-to-secrettemplates.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:07.677Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: feat: add support to secretTemplates", + "description": "Signed-off-by: jonathansp \n\n**What this PR does / why we need it**:\n\nThis change introduces the concept of SecretTemplate for Certificates. When a certificate is issued, a new secret is created to hold the certificate data. This secret is created by cert-manager. In order to use solutions like [kubed](https://appscode.com/products/kubed/v0.12.0/guides/config-syncer/intra-cluster/) to copy this secret to multiple namespaces, this created secret must be annotated. \n\nSecretTemplate is a property of CertificateSpec. Labels and Annotations defined there will be copied to the Secret when required.\n\n**Which issue this PR fixes**:\n\nfixes #2576\n\n**Special notes for your reviewer**:\n\nThis PR is not 100% finished yet. After reviewing I will add the documentation of the new feature as well as the release note.\n\n**Release note**:\n\n```release-note\nAdd support for adding custom annotations and labels to the Secret containing the TLS key pair.\n```", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "/milestone Next\n\nI'm adding this to the next milestone during our 2021-05-27 triage session, because it seems like it is close to being mergable and because it solves a much reported issue.\n\nIt also ties in quite closely with https://github.com/jetstack/cert-manager/pull/3537 which makes cert-manager copy the Certificate labels to the Secret.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "size-xl", + "approved", + "lgtm", + "area-api", + "kind-feature", + "dco-signoff--yes", + "area-testing", + "ok-to-test", + "area-deploy" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [ + "Secret", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/3828", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 8, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:44:07.677Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-3931-added-poddisruptionbudgets-to-helm-chart.json b/solutions/cncf-generated/cert-manager/cert-manager-3931-added-poddisruptionbudgets-to-helm-chart.json new file mode 100644 index 00000000..8e050343 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-3931-added-poddisruptionbudgets-to-helm-chart.json @@ -0,0 +1,57 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:52.614Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Added PodDisruptionBudgets to helm chart", + "description": "> 📢 **Update 2023-10-23 - @wallrj - Documentation**\n> For anyone interested in this feature please read the [High Availability section of the cert-manager Best Practice guide](https://cert-manager.io/docs/installation/best-practice/#high-availability) and provide any feed back and improvements via the [cert-manager website repo](https://github.com/cert-manager/website). Thanks!\n\n**What this PR does / why we need it**:\n\nAdded PodDisruptionBudgets to helm chart. It is configurable via the values.yaml.\n\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #3898\n\n**Special notes for your reviewer**:\n\n**Release note**:\n\n```release-note\nHelm: Added PodDisruptionBudgets for cert-manager components to the Helm chart (disabled by default).\n```", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "If we are not providing sensible defaults for PDB values, I'm not sure I see the value in us including this option in the chart (given it's entirely user configurable at the moment) - I'd expect that if we do create a PDB, we use values that makes sense for the various components of the project. As-is, this is just a thin wrapper around the PDB API with less flexibility than if users just manually created these resources.\n\nWDYT?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Please add support of new apiVersion for PDB. https://github.com/kubernetes/kubernetes/blob/v1.21.3/CHANGELOG/CHANGELOG-1.21.md?plain=1#L602\r\n\r\n@munnerz @irbekrm any update on your side about status of this PR?\n@z0rc As far as I found out policy/v1 is only available starting with v1.21. I think we should still support older k8s versions than that.\n@e96wic for this purpose helm provides `.Capabilities.APIVersions.Has` function, that can be used something like this:", + "> @z0rc As far as I found out policy/v1 is only available starting with v1.21. I think we should still support older k8s versions than that.\r\n\r\nThe supported k8s versions we target today are 1.16-1.22, meaning that yes, we should continue to use `v1beta1` and not move to `v1` yet. This is a similar pattern to how we've handled other API deprecations in the project.\r\n\r\n> @e96wic for this purpose helm provides .Capabilities.APIVersions.Has function, that can be used something like this:\r\n\r\nI don't think we should do this - it doesn't really bring any benefits for the k8s versions we supported, and additionally we'll end out needing many different 'switches' if the actual PDB schema has changed between v1beta1 and v1.\r\n\r\nWe can keep using v1beta1 until v1.21 is the minimum supported version (i.e. in approximately a years time).\n> if the actual PDB schema has changed between v1beta1 and v1\r\n\r\nIt didn't, check the link provided." + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "size-l", + "release-note", + "approved", + "lgtm", + "kind-feature", + "dco-signoff--yes", + "ok-to-test", + "area-deploy" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [ + "Pod", + "Poddisruptionbudget" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/3931", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 16, + "comments": 74 + }, + "security": { + "scannedAt": "2026-02-27T17:43:52.614Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-4793-use-multivalue-records-instead-of-simple-records.json b/solutions/cncf-generated/cert-manager/cert-manager-4793-use-multivalue-records-instead-of-simple-records.json new file mode 100644 index 00000000..a1ef0919 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-4793-use-multivalue-records-instead-of-simple-records.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:02.940Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Use multivalue records instead of simple records ", + "description": "Use multivalue records instead of simple records for the AWS Route53 ACME DNS challenge solver.\nWhen using simple records, it would be impossible to create a second record in a Route53 hosted zone with the same name/type.\n\nThis way when either using the same domain name on multiple clusters or using the same CNAME record for multiple domains would create a new challenge instead of overwriting the other challenges.\n\n### Pull Request Motivation\nFix https://github.com/jetstack/cert-manager/issues/3460\n\n### Kind\nbug\n\n### Release Note\n\n```release-note\nUse multivalue records instead of simple records for the AWS Route53 ACME DNS challenge solver, to allow for multiple challenges for the same domain at the same time\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "### Pull Request Motivation\nThere is a race condition where multiple cert-managers in different clusters try to validate the ownership of a same domain using DNS01 challenge, when issuing certificates that have the same commonName or overlapping dnsNames.\n\nFor instance, imaging below certificates are simultaneously submitted to its corresponding cluster:\nCert applied to cluster-1\n```yaml\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n name: cert-for-cluster-1\nspec:\n commonName: app.cluster-1.example.com\n dnsNames:\n - app.example.com\n issuerRef:\n kind: ClusterIssuer\n name: letsencrypt\n```\nCert applied to cluster-2\n```yaml\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n name: cert-for-cluster-2\nspec:\n commonName: app.cluster-2.example.com\n dnsNames:\n - app.example.com\n issuerRef:\n kind: ClusterIssuer\n name: letsencrypt\n```\nWhen using DNS01 challenge to validate the ownership of `app.example.com`, Let's Encrypt will handout different tokens for each cluster/cert-manager. The happy path would be: `UPDATE_CNAME_FOR_CLUSTER1 -> VALIDATED_IN_CLUSTER1 -> UPDATE_CNAME_FOR_CLUSTER2 -> VALIDATED_IN_CLUSTER2`. The race condition, however, happens when the CNAME record value got updated before cert-manager (let's encrypt) in the first cluster finishing validating. In this case, the challenge for `app.example.com` will stuck in `pending` state in cluster-1 forever, result in certs won't be issued. Currently, someone need to manually delete", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "### Pull Request Motivation\r\nThere is a race condition where multiple cert-managers in different clusters try to validate the ownership of a same domain using DNS01 challenge, when issuing certificates that have the same commonName or overlapping dnsNames.\r\n\r\nFor instance, imaging below certificates are simultaneously submitted to its corresponding cluster:\r\nCert applied to cluster-1", + "Cert applied to cluster-2", + "When using DNS01 challenge to validate the ownership of `app.example.com`, Let's Encrypt will handout different tokens for each cluster/cert-manager. The happy path would be: `UPDATE_CNAME_FOR_CLUSTER1 -> VALIDATED_IN_CLUSTER1 -> UPDATE_CNAME_FOR_CLUSTER2 -> VALIDATED_IN_CLUSTER2`. The race condition, however, happens when the CNAME record value got updated before cert-manager (let's encrypt) in the first cluster finishing validating. In this case, the challenge for `app.example.com` will stuck in `pending` state in cluster-1 forever, result in certs won't be issued. Currently, someone need to manually delete the stuck challenge so cert-manager can update the DNS record again to complete the challenge validation.\r\n\r\nTherefore, we would like to add a new feature so the system can self-heal and become eventually consistent on its own. Besides, it can also be used to solve other general stuck challenge issue.\r\n\r\n#### Feature Description\r\n\r\n- A new argument `dns01-check-recreate-period` is added to allow setting a deadline before the _pending_ challenge gets deleted and recreated.\r\n- The default value is 0 which indicates never auto-delete any pending challenge, so this feature is fully backward compatible\r\n- If DNS01 check recreation is enabled and the pending challenge's creation time passed the deadline, the syncer will delete the challenge automatically. The challenge will be created by other parts of cert-manager so DNS record for the challege can be update the proper value hence complete the validation.\r\n\r\n### Kind\r\nfeature\r\n\r\n### Release Note" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "approved", + "lgtm", + "size-s", + "kind-bug", + "area-acme", + "dco-signoff--yes", + "ok-to-test", + "area-acme-dns01" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/4793", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 8, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:44:02.940Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-483-dnsimple-dns-issuer.json b/solutions/cncf-generated/cert-manager/cert-manager-483-dnsimple-dns-issuer.json new file mode 100644 index 00000000..de678d51 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-483-dnsimple-dns-issuer.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:57.486Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: DNSimple DNS Issuer", + "description": "**What this PR does / why we need it**:\nAdds a DNS issuer for DNSimple\n\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #472 \n\n**Special notes for your reviewer**:\n\n**Release note**:\n\n```release-note\nAdd DNSimple as a DNS Issuer\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "needs-rebase", + "size-xxl", + "area-acme" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/483", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 10, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:43:57.486Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-5084-wip-configurable-context-timeout.json b/solutions/cncf-generated/cert-manager/cert-manager-5084-wip-configurable-context-timeout.json new file mode 100644 index 00000000..d4449f63 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-5084-wip-configurable-context-timeout.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:54.644Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: WIP: configurable context timeout", + "description": "### Pull Request Motivation\n\nThe initial issue is slow responding ZeroSSL ACME api. So most of the time cert-manager is not able to register.\n\n```\ncert-manager/controller/clusterissuers \"msg\"=\"failed to register an ACME account\" \"error\"=\"context deadline exceeded\"\n```\n\nThe root cause is already reported to ZeroSSL support but overall the fixed value context timeout of 10 seconds is not ideal when it come to edge-clusters with not reliable or slow connection.\n\nThis PR should:\n- fix #5080 \n- fix cert-manager/website#583\n\nQuestion to maintainer: Should this be back-ported to previous versions?\n\n### Kind\n\nfeature\n\n### Release Note\n\n```release-note\nAdded context-timeout flag for controller to support slow responding ACME apis\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> This PR compliments #5084, as this seems to be a stale PR.\n### Pull Request Motivation\nSome ACME providers take a long time to respond, where a hardcoded 10 seconds timeout is insufficient to request certificates, which results in a context deadline exceeded.\n\nThis PR should fix:\n* #5080 \n* cert-manager/website#583\n\n### Kind\nfeature\n\n### Release Note\n\n```release-note\nAdded IssuerSetupTimeout flag for controller to override context timeout for slow responding ACME API's\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "cert-manager/controller/clusterissuers \"msg\"=\"failed to register an ACME account\" \"error\"=\"context deadline exceeded\"", + "> This PR compliments #5084, as this seems to be a stale PR.\r\n### Pull Request Motivation\r\nSome ACME providers take a long time to respond, where a hardcoded 10 seconds timeout is insufficient to request certificates, which results in a context deadline exceeded.\r\n\r\nThis PR should fix:\r\n* #5080 \r\n* cert-manager/website#583\r\n\r\n### Kind\r\nfeature\r\n\r\n### Release Note\r\n\r\n", + "Hi @fatz. Thanks for your PR.\n\nI'm waiting for a [cert-manager](https://github.com/orgs/cert-manager/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/cert-manager/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=cert-manager%2Fcert-manager).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\n[APPROVALNOTIFIER] This PR is **NOT APPROVED**\n\nThis pull-request has been approved by: *fatz*\nTo complete the [pull request process](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process), please assign **jakexks** after the PR has been reviewed.\nYou can assign the PR to them by writing `/assign @jakexks` in a comment when ready.\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=cert-manager%2Fcert-manager).\n\n
\nNeeds approval from an approver in each of these files:\n\n- **[OWNERS](https://github.com/cert-manager/cert-manager/blob/master/OWNERS)**\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n/kind feature\r\n/ok-to-test\n@fatz: The following tests **failed**, say `/retest` to rerun all failed tests or `/retest-required` to rerun all mandatory failed tests:\n\nTest name | Commit | Details | Required | Rerun command\n--- | --- | --- | --- | ---\npull-cert-manager-make-e2e-v1-23 | 657c674a60457c1c35ae490d344dbfa417ffd5b7 | [link](https://prow.build-infra.jetstack.net/view/gs/jetstack-logs/pr-logs/pull/cert-manager_cert-manager/5084/pull-cert-manager-make-e2e-v1-23/1522595901862842368) | true | `/test pull-cert-manager-make-e2e-v1-23`\npull-cert-manager-make-test | 657c674a60457c1c35ae490d344dbfa417ffd5b7 | [link](https://prow.build-infra.jetstack.net/view/gs/jetstack-logs/pr-logs/pull/cert-manager_cert-manager/5084/pull-cert-manager-make-test/1522595901619572736) | true | `/test pull-cert-manager-make-test`\npull-cert-manager-bazel | 657c674a60457c1c35ae490d344dbfa417ffd5b7 | [link](https://prow.build-infra.jetstack.net/view/gs/jetstack-logs/pr-logs/pull/cert-manager_cert-manager/5084/pull-cert-manager-bazel/1522595901573435392) | true | `/test pull-cert-manager-bazel`\npull-cert-manager-e2e-v1-23 | 657c674a60457c1c35ae490d344dbfa417ffd5b7 | [link](https://prow.build-infra.jetstack.net/view/gs/jetstack-logs/pr-logs/pull/cert-manager_cert-manager/5084/pull-cert-manager-e2e-v1-23/1522595901799927808) | true | `/test pull-cert-manager-e2e-v1-23`\n\n[Full PR test history](https://prow.build-infra.jetstack.net/pr-history?org=cert-manager&repo=cert-manager&pr=5084). [Your PR dashboard](https://jetstack-build-infra.appspot.com/pr/fatz). Please help us cut down on flakes by [linking to](https://git.k8s.io/community/contributors/devel/sig-testing/flaky-tests.md#github-issues-for-known-flakes) an [open issue](https://github.com/cert-manager/cert-manager/issues?q=is:issue+is:open) when you hit one in your PR.\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository. I understand the commands that are listed [here](https://go.k8s.io/bot-commands).\n
\n\n> I also think we could maybe change the name from \"ContextTimeout\" to something like \"IssuerSetupTimeout\" so that it's super specific what the timeout we're configuring does 👍\r\n\r\nYeah sounds good. But I'm not 100% sure if its only this specific timeout\n- Push - \r\nStale since 10 days, sadly don't know how to work on the Repo otherwise I'd contrib. - Issue still persists. \nI've been using this patch for a few days now, but it as is, does not solve entirely the problem (at least in my case, using `zerossl` as the ACME provider).\r\n\r\nI also had to increase the timeout in the `pkg/acme/client/middleware/logger.go`, as I noticed it was failing at the beginning of the registration:" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "do-not-merge-work-in-progress", + "kind-feature", + "size-m", + "dco-signoff--yes", + "ok-to-test" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/5084", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 13, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:43:54.644Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-5225-add-flag-to-allow-switching-ingressclassname-specification.json b/solutions/cncf-generated/cert-manager/cert-manager-5225-add-flag-to-allow-switching-ingressclassname-specification.json new file mode 100644 index 00000000..b91d21b3 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-5225-add-flag-to-allow-switching-ingressclassname-specification.json @@ -0,0 +1,60 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:00.164Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Add flag to allow switching ingressClassName specification", + "description": "### Pull Request Motivation\n\nIn order to support both the legacy annotation and the new ingressClassName of Ingress v1, a flag has been added to allow switching. This should allow users with clusters supporting only ingressClassName to use the latest version, while being fully backwards compatible with existing users that use clusters that do not support ingressClassName.\n\nCloses: #4821 \n\n### Kind\n\nfeature\n\n### Release Note\n\n```release-note\nAdd new flag `acme-http01-solver-use-ingress-class-name` to allow switching between the legacy annotation and new ingressClassName field.\n```", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "Hi! I'll be spending time reviewing this PR today.\n\n@alijahnas A few questions:\n\n1. Which version of ingress-nginx are you using?\n2. What is the value of your Issuer's `spec.acme.solvers.http01.ingress.class`?\n3. Do you use the flag `--ingress-class` on your ingress-nginx deployment?\n\nCheck that the value in `spec.acme.solvers.http01.ingress.class` matches the value in `--ingress-class`.\n\nNote that the flag `--ingress-class` doesn't refer to an IngressClass resource. The flag `--ingress-class` refers to the value of the annotation `kubernetes.io/ingress.class`.", + "steps": [ + "Which version of ingress-nginx are you using?", + "What is the value of your Issuer's `spec.acme.solvers.http01.ingress.class`?", + "Do you use the flag `--ingress-class` on your ingress-nginx deployment?" + ], + "codeSnippets": [ + "Hi @dsonck92. Thanks for your PR.\n\nI'm waiting for a [cert-manager](https://github.com/orgs/cert-manager/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/cert-manager/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=cert-manager%2Fcert-manager).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\nHi, thanks for the PR @dsonck92 !\r\n\r\nI've not looked at the contents yet, in general the idea of a flag seems not bad if it solves people's problems.\r\n\r\nThere was a lengthy discussion about this and around how downstream projects like us and ingress controllers should approach the new field vs annotation and I know that at least ingress-nginx folks rolled back a bunch of deprecations in regards to the class name field vs annotation as an outcome (same as us).\r\n\r\nTo help us understand the user value of this flag, could you please provide one or two scenarios where the latest cert-manager release does not work for you (i.e which ingress controller and which version of it and why, if you know that)?\r\n(I'm aware that there are a bunch of comments on the original issue, but most of them don't have enough detail)\n@irbekrm Well, it turns out I could bump the version of my traefik install enough to the point where I could specify the class name as suggested by cert-manager (the value to tweak this setting was added to the helm chart), and this did fix the issue of traefik not picking up the ingresses with only an annotation.\r\n\r\nSo as far as my own problem, this is actually solved. However, looking back at the whole crisis going on with terraform-kubernetes-provider for lack of support of v1 ingresses, I do have the opinion that giving the end user choice of going full v1 if possible is definitely a plus. In my eyes, the implementation of the flag was relatively straight forward (not sure why it's classed size/M) and documented so it adds the option to go full v1 without annotations. But I do understand that I won't be the one that maintains this code in the long run.\nThanks @dsonck92 appreciate that the current situation is generally suboptimal as it is also not clear if there ever will be a migration path off the annotation or if the ingress controllers and us will have to keep supporting this 'forever' in which case we'd then have two mechanisms to maintain 'forever' instead of one with this PR- that is my main concern. \r\n\r\nFor now, I'd probably keep this open for a bit to see if any more use cases appear - for now I am not convinced that there is enough value for users to justify this.\nSo what is the current workaround now? I have 2 ingress controllers, and solvers for one are not being picked up.\r\nSetting edit in place gives me cryptic:", + "cert-manager has both ingress controllers set in solvers settings. I am using `ingressClassName` field in spec, as recommended by `networking.k8s.io/v1` resource version.\r\n\r\nVersions:\r\nhelm chart `nginx-ingress-0.14.0` AppVersion `2.3.0`\r\nhelm chart `cert-manager-v1.7.1` AppVersion `v1.7.1`\r\nK8s Rev: `v1.21.9`\nI came across this as I'm having similar issues. cert-manager 1.9.1 and nginx 1.3.1. None of my renewals work anymore without me going into the ingress and setting ingressClassName on the spec. The annotation is not working anymore. Would love to know what the intended solution is.\n> For now, I'd probably keep this open for a bit to see if any more use cases appear - for now I am not convinced that there is enough value for users to justify this.\r\n\r\nOne use-case is newer ingress controllers that do not support the ingress annotation, like the ingress controller in Cilium. In my case, using the ingress controller is Cilium easier as I'm already running Cilium, so I don't need to install a extra component to handle ingresses.\nWhy do we need a flag to switch? Is it not possible to have both fields simultaneously?\r\n\r\nWe can have a feature flag; when enabled, the cert-manager adds `ingressClassName` in addition to the classic annotations. With this approach, backward compatibility is not compromised. We can plan for deprecating the classic annotations at a later point in time. Does this make sense?\nAs the annotation is deprecated, why not go with the initial solution from https://github.com/cert-manager/cert-manager/issues/4821? \r\n\r\nJust add", + "It's non breaking, easy to upgrade. \r\n\r\n\nHi, we have the same issue as @markgould. The annotation doesn't work, and the ingress controller tells us that : error=\"ingress class annotation is not equal to the expected by Ingress Controller\"\r\n\r\nWe need to use IngressClassName in the ingress objects created by cert-manager.\r\n\r\nWhen do you think this will be available ? Thanks.\r\n\r\n\r\n\r\n\nHi! I'll be spending time reviewing this PR today.\r\n\r\n@alijahnas A few questions:\r\n\r\n1. Which version of ingress-nginx are you using?\r\n2. What is the value of your Issuer's `spec.acme.solvers.http01.ingress.class`?\r\n3. Do you use the flag `--ingress-class` on your ingress-nginx deployment?\r\n\r\nCheck that the value in `spec.acme.solvers.http01.ingress.class` matches the value in `--ingress-class`.\r\n\r\nNote that the flag `--ingress-class` doesn't refer to an IngressClass resource. The flag `--ingress-class` refers to the value of the annotation `kubernetes.io/ingress.class`.\nHi @maelvls \r\nWe use ingress version 1.5.1, and cert-manager 1.9.1.\r\nThanks a lot !\r\n\nHey @dsonck92. Thank you so much for the PR!\r\n\r\nUsing a flag to \"toggle\" between the annotation mode and the ingressClassName mode seemed OK to me at first, but I see two problems:\r\n\r\n- **Validation Consistency**: the annotation value for `kubernetes.io/ingress.class` that you can set with `http01.ingress.class` can be any string. On the other side, `ingressClassName` must only be a DNS label. A notable example is the Azure AGIC ingress controller that uses the annotation value `azure/application-gateway`. With `--acme-http01-solver-use-ingress-class-name=false`, no validation issue will be found. With `--acme-http01-solver-use-ingress-class-name`, the ingress controller will start failing.\r\n- **Incompatible Ingress Controllers:** if I use an ingress controller that only supports annotations (ingress-gce is the only example) and I also want to use ingressClassName with another ingress controller, then this flag won't be useful.\r\n\r\nWhat do you think about adding a new field on the Issuer and ClusterIssuer CRD instead of using a flag?\r\n\r\nFor example:" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "size-l", + "release-note", + "needs-rebase", + "area-api", + "area-acme", + "dco-signoff--yes", + "ok-to-test", + "area-acme-http01", + "area-deploy", + "needs-kind" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [ + "Ingress" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/5225", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 9, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:44:00.164Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-649-adds-kubernetes-authentication-type-for-vault-issuer.json b/solutions/cncf-generated/cert-manager/cert-manager-649-adds-kubernetes-authentication-type-for-vault-issuer.json new file mode 100644 index 00000000..e0b551a7 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-649-adds-kubernetes-authentication-type-for-vault-issuer.json @@ -0,0 +1,59 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:04.088Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Adds Kubernetes authentication type for Vault Issuer", + "description": "**What this PR does / why we need it**:\n\n**Which issue this PR fixes**\nfixes #647\n\n**Special notes for your reviewer**:\nSo I've actually put together a patch, and just trying to tie up the e2e tests. This requires that the VaultInitializer have a method that configures the kubernetes auth backend which requires at a minimum: the master url and CA cert.\n\nAnyone happen to know what the right way to get these in there are?\n\n**Release note**:\n```release-note\nAdds Kubernetes authentication type for Vault Issuer\n\nThis auth method uses the cert-manager's service account token to request a Vault token. See the Vault documentation for instructions on setting up the auth backend as well as the necessary role bindings on the Kubernetes side.\n\nExample configuration for using this auth method:\n\napiVersion: certmanager.k8s.io/v1alpha1\nkind: Issuer\nmetadata:\n name: vault-issuer\n namespace: default\nspec:\n vault:\n path: pki_int/sign/example-dot-com\n server: https://vault\n auth:\n kubern", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/ok-to-test\nAre there any guides on how to work with the e2e tests? For testing a new auth backend, the Vault instance needs to have the backend enabled and configured--which requires the k8s master url and CA cert. It wasn't clear from the current setup if there's any way to get those in the VaultInitializer.\n@kunickiaj Just a suggestion, would be nice to have the kubernetes auth mount path configurable, just like this PR is trying to do for approle https://github.com/jetstack/cert-manager/pull/612\r\n\r\nAt the moment it is hardcoded to `/auth/kubernetes`\nSeems like a reasonable request--right now still stuck on how to make this testable given the dependency on k8s cluster parameters. Any ideas on how the test jobs are setup and how this could be done would be welcome.\n/assign @kragniz\r\n\r\nCould you take a look at this and help out with the testing environment pieces?\n@kunickiaj sorry for the lack of response here - could you expand a bit more on the issues you're encountering with the test environment?\r\n\r\nIf you can rebase your PR, we should be able to get these e2e's running (and hopefully passing too!). There's not too much special about the environment (it is provisioned with minikube at the moment) - what specified requirements of that environment do you have? 😄 \nI'll get it rebased today. For the e2e environment, the Vault auth backend for k8s needs to have two parameters provided at a minimum: k8s master url and CA cert for the master. If its minikube hopefully that's fairly easy to make deterministic.\nSorry for the delay, rebased the patch, still needs e2e setup.\n> This requires that the VaultInitializer have a method that configures the kubernetes auth backend which requires at a minimum: the master url and CA cert.\r\n>\r\n> Anyone happen to know what the right way to get these in there are?\r\n\r\n[banzaicloud/bank-vaults](https://github.com/banzaicloud/bank-vaults) is a really good Vault Operator that has an automatic configuration toolset\r\n\r\n> **Full disclosure:** I'm a bank-vaults contributor\n@kunickiaj \r\n\r\nI didn't experiment with this Vault auth method much yet but maybe you could use the helper method in test/e2e/framework/util.go to fetch the config:", + "Would it give you what you need to setup the Kubernetes auth backend in Vault afterwards?\nIs there anything we can do to get this feature in the near future? It would be extremely useful to have native k8s auth to vault.\nFriendly ping @kunickiaj - are you able to rebase this pull request and address the review comments? 😄 \nJust did a quick rebase, looking at the review comments now.\n@vdesjardins\r\n\r\n> \r\n> I didn't experiment with this Vault auth method much yet but maybe you could use the helper method in test/e2e/framework/util.go to fetch the config:\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "size-xl", + "needs-rebase", + "area-api", + "kind-documentation", + "area-vault", + "dco-signoff--yes", + "area-testing", + "area-deploy" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [ + "Service", + "Namespace", + "Role" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/649", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 8, + "comments": 35 + }, + "security": { + "scannedAt": "2026-02-27T17:44:04.088Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-6755-bugfix-wrong-certificate-chain-is-used-if-preferredchain-is-co.json b/solutions/cncf-generated/cert-manager/cert-manager-6755-bugfix-wrong-certificate-chain-is-used-if-preferredchain-is-co.json new file mode 100644 index 00000000..b1f8f6fb --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-6755-bugfix-wrong-certificate-chain-is-used-if-preferredchain-is-co.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:01.362Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: bugfix: wrong certificate chain is used if preferredChain is configured", + "description": "### Pull Request Motivation\n\nDue to [recent change of Let's Encrypt Chain of Trust](https://letsencrypt.org/2023/07/10/cross-sign-expiration), the short certificate chain (having ISRG Root X1 as root certificate) became the default certificate provided for `/acme/certificate` API endpoint. Old long certificate chain, which is cross-signed with DST Root CA X3, is now served as an alternate certificate chain.\n\nCaused by this change, if `preferredChain` is configured as `ISRG Root X1` in Issuer or ClusterIssuer, cert-manager now returns a long-chain certificate, cross-signed by DST Root CA, since the current implementation does not include default certificate chain while looking for preferred chain.\n\nGoing worse, this does not fall back to the default chain but returns the cross-signed chain since that chain also includes `ISRG Root CA X1` as intermediate CA. \n\nThis PR addresses the issue by \n\n1. Including default certificate chain while evaluating preferred certificate bundle\n2. Checking", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "size-l", + "release-note", + "approved", + "lgtm", + "area-api", + "kind-bug", + "area-acme", + "dco-signoff--yes", + "area-testing", + "ok-to-test" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/6755", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 9, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:44:01.362Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-713-add-cert-manager-specific-metrics-to-prometheus-endpoint.json b/solutions/cncf-generated/cert-manager/cert-manager-713-add-cert-manager-specific-metrics-to-prometheus-endpoint.json new file mode 100644 index 00000000..dcbe5b5a --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-713-add-cert-manager-specific-metrics-to-prometheus-endpoint.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:08.663Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Add cert-manager specific metrics to Prometheus endpoint", + "description": "**What this PR does / why we need it**: \n\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #55 \n\n**Special notes for your reviewer**: this is a rebase + refactor of #225\n\n**Release note**:\n\n```release-note\nAdd cert-manager specific Prometheus metrics\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "The dep changes here look wrong - could you take a look?\n\nUse `./hack/update-deps.sh` to update dependencies.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "The dep changes here look wrong - could you take a look?\r\n\r\nUse `./hack/update-deps.sh` to update dependencies.\n(+ also do it as a rebase if possible, so the history isn't all weird) 😄 \n`dep ensure` went a bit mad\nit looks a bit more sensible now\ntested with a CA issuer, seems to work okay:", + "And for an issuer that fails:" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "approved", + "lgtm", + "size-xxl", + "dco-signoff--yes" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/713", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 7, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:44:08.663Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-7549-chore-issuer-cloudflare-ensure-we-set-zoneid.json b/solutions/cncf-generated/cert-manager/cert-manager-7549-chore-issuer-cloudflare-ensure-we-set-zoneid.json new file mode 100644 index 00000000..79260e8a --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-7549-chore-issuer-cloudflare-ensure-we-set-zoneid.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:50.306Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: chore(issuer/cloudflare): ensure we set ZoneID", + "description": "Cloudflare have stopped including zone IDs in their record responses now, 2 months after they said they did and with their trademark zero effort in outreach to consumers of their API. Ensure that findTxtRecord returns a record struct with the zone ID set regardless.\n\nFixes #7540\n\n### Pull Request Motivation\n\nMake issuing certificates using DNS01 challenges work on Cloudflare again.\n\n### Kind\n\n/kind bug\n\n### Release Note\n\n```release-note\nFix issuing of certificates via DNS01 challenges on Cloudflare after a breaking change to the Cloudflare API\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is an automated cherry-pick of #7549\n\n/assign SgtCoDFish\n\n```release-note\nFix issuing of certificates via DNS01 challenges on Cloudflare after a breaking change to the Cloudflare API\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "This is an automated cherry-pick of #7549\r\n\r\n/assign SgtCoDFish" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "size-xs", + "release-note", + "approved", + "lgtm", + "kind-bug", + "area-acme", + "dco-signoff--yes", + "ok-to-test", + "area-acme-dns01" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/7549", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 54, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:43:50.306Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-920-add-dns01-provider-for-dnsimple.json b/solutions/cncf-generated/cert-manager/cert-manager-920-add-dns01-provider-for-dnsimple.json new file mode 100644 index 00000000..1845c99a --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-920-add-dns01-provider-for-dnsimple.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:05.433Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cert-manager: Add DNS01 provider for DNSimple", + "description": "Signed-off-by: Marc Sensenich \n\n**What this PR does / why we need it**:\n\nAdds DNSimple as a DNS01 Provider\n\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #\n\nCloses #472 \nReplaces #483 \n\n**Special notes for your reviewer**:\n\n**Release note**:\n\n```release-note\nAdd DNSimple as a DNS01 Provider\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What this PR does / why we need it**:\nAdds a DNS issuer for DNSimple\n\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #472 \n\n**Special notes for your reviewer**:\n\n**Release note**:\n\n```release-note\nAdd DNSimple as a DNS Issuer\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\r\n\r\n**What this PR does / why we need it**:\r\nAdds a DNS issuer for DNSimple\r\n\r\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #472 \r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Release note**:\r\n", + "@kragniz @munnerz Anything additional I can do from my end to ensure the success of this PR? I missed today's Zoom unfortunately. \nI'm currently on holiday so won't be able to take a look until next week - thanks for putting this together!\r\n\r\n/ok-to-test\n/retest\n@munnerz @kragniz I've been hitting" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "release-note", + "needs-rebase", + "area-api", + "kind-documentation", + "size-xxl", + "area-acme", + "dco-signoff--yes", + "area-acme-dns01" + ], + "category": "security", + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cert-manager/cert-manager/pull/920", + "sourceRepo": "cert-manager/cert-manager", + "reactions": 8, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:44:05.433Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-1109-remove-privileged-and-add-some-capabilities.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-1109-remove-privileged-and-add-some-capabilities.json new file mode 100644 index 00000000..d86ff36c --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-1109-remove-privileged-and-add-some-capabilities.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:14.347Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: Remove privileged and add some capabilities", + "description": "### What problem does this PR solve?\n\nFix #1101 \n\n### What is changed and how does it work?\n\nChaos Daemon now works with `privileged: false`. In replace, it creates a `fuse` device and add it to cgroup device list manually.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "cherry-pick #1109 to release-1.0\n\n---\n\n### What problem does this PR solve?\n\nFix #1101 \n\n### What is changed and how does it work?\n\nChaos Daemon now works with `privileged: false`. In replace, it creates a `fuse` device and add it to cgroup device list manually.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "status-can-merge" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/1109", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:14.347Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-1256-split-pkg-utils-into-multiple-different-pkg-or-file.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-1256-split-pkg-utils-into-multiple-different-pkg-or-file.json new file mode 100644 index 00000000..5c175d88 --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-1256-split-pkg-utils-into-multiple-different-pkg-or-file.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:15.326Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: split pkg/utils into multiple different pkg or file", + "description": "### What problem does this PR solve?\n\nFix #1254. Collecting multiple unrelated functions into one pkg is a bad idea, because the more it imports, the less it can be imported in other pkgs.\n\nDuring the splitting, I followed these rules:\n\n1. If the function is only used by one pkg, then it will be moved into the `utils.go` in that pkg.\n\n2. If the function is used by multiple pkgs, then it will be moved into a standalone pkg to provide the utils.\n\nIn #1206, I have faced a lot of cycle importing problems caused by other files in utils 😢 .", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/run-e2e-tests", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [api/v1alpha1/common\\_types.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2NvbW1vbl90eXBlcy5nbw==) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/common\\_webhook.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2NvbW1vbl93ZWJob29rLmdv) | `100.00% <ø> (ø)` | |\n| [api/v1alpha1/dnschaos\\_type.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2Ruc2NoYW9zX3R5cGUuZ28=) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/dnschaos\\_webhook.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2Ruc2NoYW9zX3dlYmhvb2suZ28=) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/httpchaos\\_types.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2h0dHBjaGFvc190eXBlcy5nbw==) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/iochaos\\_types.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2lvY2hhb3NfdHlwZXMuZ28=) | `0.00% <ø> (-40.00%)` | :arrow_down: |\n| [api/v1alpha1/jvmchaos\\_webhook.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2p2bWNoYW9zX3dlYmhvb2suZ28=) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/kernelchaos\\_types.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2tlcm5lbGNoYW9zX3R5cGVzLmdv) | `0.00% <ø> (-20.00%)` | :arrow_down: |\n| [api/v1alpha1/kernelchaos\\_webhook.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2tlcm5lbGNoYW9zX3dlYmhvb2suZ28=) | `100.00% <ø> (+14.81%)` | :arrow_up: |\n| [api/v1alpha1/kinds.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2tpbmRzLmdv) | `27.27% <ø> (+0.60%)` | :arrow_up: |\n| ... and [129 more](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256/diff?src=pr&el=tree-more) | |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256?src=pr&el=footer). Last update [643db9f...5f30e41](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1256?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments)." + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "status-can-merge" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/1256", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 3, + "comments": 31 + }, + "security": { + "scannedAt": "2026-02-27T17:46:15.326Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-1277-feat-make-selector-support-expression-selectors.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-1277-feat-make-selector-support-expression-selectors.json new file mode 100644 index 00000000..20ee5da4 --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-1277-feat-make-selector-support-expression-selectors.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:17.808Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: feat: make selector support expression selectors.", + "description": "### What problem does this PR solve?\n\nFixed #1266 \n\n### What is changed and how does it work?\nAdd expressionSelectors field in selector spec and use metav1.LabelSelectors to do expression match operations.\n\n### Checklist\n\nTests\n\n- [ ] Unit test\n- [ ] E2E test\n- [ ] Manual test (add detailed scripts or steps below)\n- [ ] No code\n\nSide effects\n\n- [ ] Breaking backward compatibility\n\nRelated changes\n\n- [ ] Need to update the documentation\n\n### Does this PR introduce a user-facing change?\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi @cwen0 @WangXiangUSTC @YangKeao @Gallardot @Colstuwjx , how about making a broken change for the label selector?\n\nThe `LabelSelector` in Kubernetes is mostly like:\n\n```go\n// A label selector is a label query over a set of resources. The result of matchLabels and\n// matchExpressions are ANDed. An empty label selector matches all objects. A null\n// label selector matches no objects.\ntype LabelSelector struct {\n\t// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\n\t// map is equivalent to an element of matchExpressions, whose key field is \"key\", the\n\t// operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.\n\t// +optional\n\tMatchLabels map[string]string `json:\"matchLabels,omitempty\" protobuf:\"bytes,1,rep,name=matchLabels\"`\n\t// matchExpressions is a list of label selector requirements. The requirements are ANDed.\n\t// +optional\n\tMatchExpressions []LabelSelectorRequirement `json:\"matchExpressions,omitempty\" protobuf:\"bytes,2,rep,name=matchExpressions\"`\n}\n```\n\nIn Choas Mesh repo, after we introduce the expressions selector, there are two fields in the CRD.\nI think it's better to keep the same as kubernetes.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "It's still WIP, I need also add some testcases for testing it. But I think it will be ready for review very soon.\r\n\n# [Codecov](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277?src=pr&el=h1) Report\n> Merging [#1277](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277?src=pr&el=desc) (13e7360) into [master](https://codecov.io/gh/chaos-mesh/chaos-mesh/commit/7e9ff3f2fc5cf08fe6bd94df6929cdf48eb08504?el=desc) (7e9ff3f) will **decrease** coverage by `2.35%`.\n> The diff coverage is `52.68%`.\n\n[![Impacted file tree graph](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/graphs/tree.svg?width=650&height=150&src=pr&token=a3XIzuy5Dk)](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277?src=pr&el=tree)", + "| [Impacted Files](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [api/v1alpha1/common\\_types.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2NvbW1vbl90eXBlcy5nbw==) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/common\\_webhook.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2NvbW1vbl93ZWJob29rLmdv) | `100.00% <ø> (ø)` | |\n| [api/v1alpha1/dnschaos\\_type.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2Ruc2NoYW9zX3R5cGUuZ28=) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/dnschaos\\_webhook.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2Ruc2NoYW9zX3dlYmhvb2suZ28=) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/httpchaos\\_types.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2h0dHBjaGFvc190eXBlcy5nbw==) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/iochaos\\_types.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2lvY2hhb3NfdHlwZXMuZ28=) | `0.00% <ø> (-40.00%)` | :arrow_down: |\n| [api/v1alpha1/jvmchaos\\_webhook.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2p2bWNoYW9zX3dlYmhvb2suZ28=) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/kernelchaos\\_types.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2tlcm5lbGNoYW9zX3R5cGVzLmdv) | `0.00% <ø> (-20.00%)` | :arrow_down: |\n| [api/v1alpha1/kernelchaos\\_webhook.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2tlcm5lbGNoYW9zX3dlYmhvb2suZ28=) | `100.00% <ø> (+14.81%)` | :arrow_up: |\n| [api/v1alpha1/kinds.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2tpbmRzLmdv) | `27.27% <ø> (+0.60%)` | :arrow_up: |\n| ... and [124 more](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277/diff?src=pr&el=tree-more) | |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277?src=pr&el=footer). Last update [05ce8b6...13e7360](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1277?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\nCI would throw error if I keep using `metav1.LabelSelectorRequirement`:", + "So I choose workaround by:" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "type-enhancement", + "component-operator", + "status-can-merge" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/1277", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 3, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:46:17.808Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-1285-refactor-dashboard-homepage-and-some-visible-changes.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-1285-refactor-dashboard-homepage-and-some-visible-changes.json new file mode 100644 index 00000000..41dffc34 --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-1285-refactor-dashboard-homepage-and-some-visible-changes.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:08.005Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: refactor: dashboard homepage and some visible changes", + "description": "### What problem does this PR solve?\n\nThis PR introduces several changes (from #1283) that will be effected in `v1.1.0`:\n\n- [x] (refactor) Make the homepage more like a dashboard\n- [x] (feat) Tourist Guide on home page\n- [x] (feat) Predefined experiments (Can be store in the IndexedDB)\n- [x] (chore) Security Mode support in UI\n- [x] (feat) Download experiment YAML definition (Resolve #1115)\n- [x] (feat) Embed Swagger API\n- [x] (chore) Delete archive experiments by UI interface #1272\n\nAnd also, this PR has some minor bug fixes:\n\n- Can't view `DNSChaos` archives\n- Repeatedly request `chaos-available-namespaces`\n- Some bug fixes\n- Styles optimized\n\nSome extra fixes were open below. When it's merged in master. Some functions will work as normal.\n\n- #1261\n- #1323\n\n### What is changed and how does it work?\n\nIn addition to the above changes, the dashboard UI has a layout change to adapt the recent features.\n\n### Checklist\n\nTests\n\n- [ ] Unit test\n- [ ] E2E test\n- [x] Manual test (add detailed ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "# [Codecov](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1285?src=pr&el=h1) Report\n> Merging [#1285](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1285?src=pr&el=desc) (7e8048c) into [master](https://codecov.io/gh/chaos-mesh/chaos-mesh/commit/7e9ff3f2fc5cf08fe6bd94df6929cdf48eb08504?el=desc) (7e9ff3f) will **decrease** coverage by `2.57%`.\n> The diff coverage is `52.82%`.\n\n[![Impacted file tree graph](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1285/graphs/tree.svg?width=650&height=150&src=pr&token=a3XIzuy5Dk)](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1285?src=pr&el=tree)\n\n```diff\n@@ Coverage Diff @@\n## master #1285 +/- ##\n==========================================\n- Coverage 55.78% 53.21% -2.58% \n==========================================\n Files 68 86 +18 \n Lines 4383 5418 +1035 \n==========================================\n+ Hits 2445 2883 +438 \n- Misses 1768 2264 +496 \n- Partials 170 271 +101 \n```\n\n| [Impacted Files](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1285?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [api/v1alpha1/common\\_types.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1285/diff?src=pr&el=tree#diff-YXBpL3YxYWxwaGExL2NvbW1vbl90eXBlcy5nbw==) | `0.00% <0.00%> (ø)` | |\n| [api/v1alpha1/common\\_webhook.go](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/1285/diff?src=pr&el=tree#dif", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "status-can-merge" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/1285", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 14, + "comments": 6 + }, + "security": { + "scannedAt": "2026-02-27T17:46:08.005Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-1504-use-tls-mutual-authentication-for-chaos-daemon.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-1504-use-tls-mutual-authentication-for-chaos-daemon.json new file mode 100644 index 00000000..dee8352d --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-1504-use-tls-mutual-authentication-for-chaos-daemon.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:16.830Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: use tls mutual authentication for chaos-daemon", + "description": "Signed-off-by: Yang Keao \n\n### What problem does this PR solve?\n\nFix #1431\n\n### What is changed and how does it work?\n\nAdd TLS authentication for grpc connection to `chaos-daemon`.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Signed-off-by: Yang Keao \n\nManually modifying the import groups are very frustrating, especially when some tools add import for you. Like in #1504 \n\n### What problem does this PR solve?\n\nI have tried to fork `goimports` and modify a little to forcely group imports into four groups:\n\n1. standard libraries\n2. third-party libraries\n3. third-party libraries contain `k8s.io`\n4. local libraries\n\nBut I haven't found a good way to `go get` binary from a fork repo :(", + "steps": [ + "standard libraries", + "third-party libraries", + "third-party libraries contain `k8s.io`", + "local libraries" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "status-can-merge" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/1504", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 3, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:46:16.830Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-2291-feat-enable-chaos-controller-manager-leader-election.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-2291-feat-enable-chaos-controller-manager-leader-election.json new file mode 100644 index 00000000..33b993e2 --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-2291-feat-enable-chaos-controller-manager-leader-election.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:10.850Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: feat: enable chaos-controller-manager leader election", + "description": "### What problem does this PR solve?\n\nIssue Number: close #1516 \n\nProblem Summary: enable chaos-controller-manager leader election\n\n### What is changed and how it works?\n\nWhat's Changed:\n\n### Related changes\n\n* no\n\n### Checklist\n\nTests\n\n- [ ] Unit test\n- [ ] E2E test\n- [x] Manual test (add detailed scripts or steps below)\n- [ ] No code\n\nSide effects\n\n- [ ] Breaking backward compatibility\n\n### Release note \n\n```release-note\nPlease add a release note.\nIf you don't think this PR needs a release note then fill it with None.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[REVIEW NOTIFICATION]\n\nThis pull request has been approved by:\n\n- STRRL\n- YangKeao\n\nTo complete the [pull request process](https://book.prow.tidb.io/#/en/workflows/pr), please ask the reviewers in the [list](https://prow.tidb.io/tichi/repos/chaos-mesh/chaos-mesh/pulls/2291/owners) to review by filling `/cc @reviewer` in the comment.\nAfter your PR has acquired the required number of LGTMs, you can assign this pull request to the committer in the [list](https://prow.tidb.io/tichi/repos/chaos-mesh/chaos-mesh/pulls/2291/owners) by filling `/assign @committer` in the comment to help you merge this pull request.\n\nThe full list of commands accepted by this bot can be found [here](https://prow.tidb.io/command-help?repo=chaos-mesh%2Fchaos-mesh).\n\n
\n\nReviewer can indicate their review by submitting an approval review.\nReviewer can cancel approval by submitting a request changes review.\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "status-can-merge", + "status-lgt2", + "size-m" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/2291", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 4, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:46:10.850Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-2366-jvmchaos-refine-with-byteman.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-2366-jvmchaos-refine-with-byteman.json new file mode 100644 index 00000000..2d8f1caa --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-2366-jvmchaos-refine-with-byteman.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:11.753Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: JVMChaos: refine with byteman", + "description": "### What problem does this PR solve?\n\nIssue Number: close #2365 \n\nProblem Summary: Now JVMChaos implement with chaosblade-exec-jvm, which lack\ndetailed documentation, and require sidecar, all together make it difficult for users to use.\n\n### What is changed and how it works?\n\nWhat's Changed:\n1. refactor the structure of JVMChaos\n2. install byteman in chaos-daemon\n\n### Related changes\n\n* PR to update `chaos-mesh/website`/`chaos-mesh/website-zh`: Yes. issue here: https://github.com/chaos-mesh/website/issues/162\n* Need to update Chaos Dashboard component, related issue:\n* Need to cheery-pick to the release branch\n\n### Checklist\n\nTests\n\n- [x] Unit test\n- [x] E2E test\n- [x] Manual test (add detailed scripts or steps below)\n- [ ] No code\n\nSide effects\n\n- [x] Breaking backward compatibility\n\n### Release note \n\n```release-note\nPlease add a release note.\nIf you don't think this PR needs a release note then fill it with None.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[REVIEW NOTIFICATION]\n\nThis pull request has been approved by:\n\n- Andrewmatilde\n- STRRL\n\nTo complete the [pull request process](https://book.prow.tidb.io/#/en/workflows/pr), please ask the reviewers in the [list](https://prow.tidb.io/tichi/repos/chaos-mesh/chaos-mesh/pulls/2366/owners) to review by filling `/cc @reviewer` in the comment.\nAfter your PR has acquired the required number of LGTMs, you can assign this pull request to the committer in the [list](https://prow.tidb.io/tichi/repos/chaos-mesh/chaos-mesh/pulls/2366/owners) by filling `/assign @committer` in the comment to help you merge this pull request.\n\nThe full list of commands accepted by this bot can be found [here](https://prow.tidb.io/command-help?repo=chaos-mesh%2Fchaos-mesh).\n\n
\n\nReviewer can indicate their review by submitting an approval review.\nReviewer can cancel approval by submitting a request changes review.\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "status-can-merge", + "status-lgt2", + "chaos-jvm", + "size-xxl" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/2366", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 4, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:11.753Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-2912-refactor-logging-in-pkg-metrics-and-moves-log-field-to-struct-fi.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-2912-refactor-logging-in-pkg-metrics-and-moves-log-field-to-struct-fi.json new file mode 100644 index 00000000..9eb19987 --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-2912-refactor-logging-in-pkg-metrics-and-moves-log-field-to-struct-fi.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:12.743Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: Refactor logging in `pkg/metrics` and moves `log` field to struct field", + "description": "Signed-off-by: afzal442 \n\nCloses: #2883 \n\n### What problem does this PR solve?\n\n### What's changed and how it works?\n\n### Related changes\n\n- [ ] Need to update `chaos-mesh/website`\n- [ ] Need to update `Dashboard UI`\n- Need to **cheery-pick to release branches**\n - [ ] release-2.1\n - [ ] release-2.0\n\n### Checklist\n\nTests\n\n- [ ] Unit test\n- [ ] E2E test\n- [ ] No code\n- [ ] Manual test (add steps below)\n\nSide effects\n\n- [ ] Breaking backward compatibility\n\n### Release note \n\n```text\nPlease add a release note.\n\nYou can safely ignore this section if you don't think this PR needs a release note.\n```\n\n### DCO\n\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:\n\n```shell\ngit commit --amend --signoff\ngit push --force\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[REVIEW NOTIFICATION]\n\nThis pull request has been approved by:\n\n- STRRL\n- WangXiangUSTC\n\nTo complete the [pull request process](https://book.prow.tidb.io/#/en/workflows/pr), please ask the reviewers in the [list](https://prow.tidb.io/tichi/repos/chaos-mesh/chaos-mesh/pulls/2912/owners) to review by filling `/cc @reviewer` in the comment.\nAfter your PR has acquired the required number of LGTMs, you can assign this pull request to the committer in the [list](https://prow.tidb.io/tichi/repos/chaos-mesh/chaos-mesh/pulls/2912/owners) by filling `/assign @committer` in the comment to help you merge this pull request.\n\nThe full list of commands accepted by this bot can be found [here](https://prow.tidb.io/command-help?repo=chaos-mesh%2Fchaos-mesh).\n\n
\n\nReviewer can indicate their review by submitting an approval review.\nReviewer can cancel approval by submitting a request changes review.\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "### DCO\r\n\r\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:", + "[REVIEW NOTIFICATION]\n\nThis pull request has been approved by:\n\n- STRRL\n- WangXiangUSTC\n\n\n\n\nTo complete the [pull request process](https://book.prow.tidb.io/#/en/workflows/pr), please ask the reviewers in the [list](https://prow.tidb.io/tichi/repos/chaos-mesh/chaos-mesh/pulls/2912/owners) to review by filling `/cc @reviewer` in the comment.\nAfter your PR has acquired the required number of LGTMs, you can assign this pull request to the committer in the [list](https://prow.tidb.io/tichi/repos/chaos-mesh/chaos-mesh/pulls/2912/owners) by filling `/assign @committer` in the comment to help you merge this pull request.\n\nThe full list of commands accepted by this bot can be found [here](https://prow.tidb.io/command-help?repo=chaos-mesh%2Fchaos-mesh).\n\n
\n\nReviewer can indicate their review by submitting an approval review.\nReviewer can cancel approval by submitting a request changes review.\n
\n\n\nWelcome @afzal442!

It looks like this is your first PR to chaos-mesh/chaos-mesh 🎉.

I'm the bot to help you request reviewers, add labels and more, See [available commands](https://prow.tidb.io/command-help).

We want to make sure your contribution gets all the attention it needs!



Thank you, and welcome to chaos-mesh/chaos-mesh. :smiley:\n/cc @STRRL \n/cc @WangXiangUSTC @g1eny0ung \n# [Codecov](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/2912?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=chaos-mesh) Report\n> Merging [#2912](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/2912?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=chaos-mesh) (a73ef6c) into [master](https://codecov.io/gh/chaos-mesh/chaos-mesh/commit/d21564823806e8c019cad91be693eeea40dd53df?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=chaos-mesh) (d215648) will **decrease** coverage by `0.07%`.\n> The diff coverage is `n/a`.\n\n[![Impacted file tree graph](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/2912/graphs/tree.svg?width=650&height=150&src=pr&token=a3XIzuy5Dk&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=chaos-mesh)](https://codecov.io/gh/chaos-mesh/chaos-mesh/pull/2912?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=chaos-mesh)" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "status-can-merge", + "status-lgt2", + "contribution", + "first-time-contributor", + "size-m", + "needs-cherry-pick-release-2-1" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/2912", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 3, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:46:12.743Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-3066-helm-chart-support-latest-api-version-of-dashboard-ingress.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-3066-helm-chart-support-latest-api-version-of-dashboard-ingress.json new file mode 100644 index 00000000..4c224966 --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-3066-helm-chart-support-latest-api-version-of-dashboard-ingress.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:09.819Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: Helm chart: Support latest api version of dashboard ingress", + "description": "Signed-off-by: Bobae Kim tree9295@gmail.com\n\n### What problem does this PR solve?\n\nClose #2998 \n\n### What's changed and how it works?\n\n- Added values to the `values.yaml` helm file for reference by `templates/ingress.yaml`.\n - `dashboard.ingress.apiVersionOverrides` field: apiVersion of ingress. This is used in `_helpers.tpl` to define apiVersion of ingress. \n - `dashboard.ingress.ingressClassName` field: For define ingress controller \n - `dashboard.ingress.paths` field: moved from `dashboard.ingress.hosts.paths`\n\n- `chaos-dashboard.ingress.apiVersion` (`_helpers.tpl`) : added helper so that ingress apiVersion is set according to k8s version.\n\n### Related changes\n\n- [ ] Need to update `chaos-mesh/website`\n- [ ] Need to update `Dashboard UI`\n- Need to **cheery-pick to release branches**\n - [ ] release-2.1\n - [ ] release-2.0\n\n### Checklist\n\nTests\n\n- [ ] Unit test\n- [ ] E2E test\n- [ ] No code\n- [x] Manual test (add steps below)\n - I tested `helm template`, `helm install`\n\nSide effe", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "### What problem does this PR solve?\n\nIt adds support for ingressClass objects in the Ingress Template for Chaos Mesh Helm chart. \n\n### What's changed and how it works?\n\ningressClassName should be specified in values.yaml instead of kubernetes.io/ingress.class: nginx annotation for Kubernetes >= 1.18\n\n### Related changes\n\n- [ ] Need to update `chaos-mesh/website`\n- [ ] Need to update `Dashboard UI`\n- Need to **cheery-pick to release branches**\n - [ ] release-2.1\n - [ ] release-2.0\n\n### Checklist\n\nTests\n\n- [ ] Unit test\n- [ ] E2E test\n- [X] No code\n- [ ] Manual test (add steps below)\n\nSide effects\n\n- [ ] Breaking backward compatibility\n\n### Release note \n\n```text\n- Add support for ingressClass objects in Chaos Mesh dashboard ingress\n```\n\n### DCO\n\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:\n\n```shell\ngit commit --amend --signoff\ngit push --force\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "### DCO\r\n\r\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:", + "### What problem does this PR solve?\r\n\r\nIt adds support for ingressClass objects in the Ingress Template for Chaos Mesh Helm chart. \r\n\r\n### What's changed and how it works?\r\n\r\ningressClassName should be specified in values.yaml instead of kubernetes.io/ingress.class: nginx annotation for Kubernetes >= 1.18\r\n\r\n### Related changes\r\n\r\n- [ ] Need to update `chaos-mesh/website`\r\n- [ ] Need to update `Dashboard UI`\r\n- Need to **cheery-pick to release branches**\r\n - [ ] release-2.1\r\n - [ ] release-2.0\r\n\r\n### Checklist\r\n\r\nTests\r\n\r\n\r\n\r\n- [ ] Unit test\r\n- [ ] E2E test\r\n- [X] No code\r\n- [ ] Manual test (add steps below)\r\n\r\n\r\n\r\nSide effects\r\n\r\n- [ ] Breaking backward compatibility\r\n\r\n### Release note ", + "### DCO\r\n\r\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "status-can-merge", + "status-lgt2", + "contribution", + "first-time-contributor", + "size-m" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [ + "Ingress" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/3066", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 4, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:46:09.819Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-3148-ci-new-check-for-changelog.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-3148-ci-new-check-for-changelog.json new file mode 100644 index 00000000..1b9fa414 --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-3148-ci-new-check-for-changelog.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:20.659Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: ci: new check for changelog", + "description": "Signed-off-by: STRRL \n\n### What problem does this PR solve?\n\nclose https://github.com/chaos-mesh/chaos-mesh/issues/3134\n\n### What's changed and how it works?\n\n- new label \"no-need-update-changelog\"\n- new GitHub CI Checks: makesure that one of them exists:\n \t- changes of CHANGELOG.md\n \t- label \"no-need-update-changelog\" on this PR\n- update PR template for the checklist with CHANGELOG\n\n### Related changes\n\n- [ ] Need to update `chaos-mesh/website`\n- [ ] Need to update `Dashboard UI`\n- Need to **cheery-pick to release branches**\n - [x] release-2.1\n - [x] release-2.0\n\n### Checklist\n\nTests\n\n- [ ] Unit test\n- [ ] E2E test\n- [ ] No code\n- [ ] Manual test (add steps below)\n\nSide effects\n\n- [ ] Breaking backward compatibility\n\n### Release note \n\n```text\nPlease add a release note.\n\nYou can safely ignore this section if you don't think this PR needs a release note.\n```\n\n### DCO\n\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. Fo", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[REVIEW NOTIFICATION]\n\nThis pull request has been approved by:\n\n- Hexilee\n- iguoyr\n\nTo complete the [pull request process](https://book.prow.tidb.io/#/en/workflows/pr), please ask the reviewers in the [list](https://prow.tidb.io/tichi/repos/chaos-mesh/chaos-mesh/pulls/3148/owners) to review by filling `/cc @reviewer` in the comment.\nAfter your PR has acquired the required number of LGTMs, you can assign this pull request to the committer in the [list](https://prow.tidb.io/tichi/repos/chaos-mesh/chaos-mesh/pulls/3148/owners) by filling `/assign @committer` in the comment to help you merge this pull request.\n\nThe full list of commands accepted by this bot can be found [here](https://prow.tidb.io/command-help?repo=chaos-mesh%2Fchaos-mesh).\n\n
\n\nReviewer can indicate their review by submitting an approval review.\nReviewer can cancel approval by submitting a request changes review.\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "### DCO\r\n\r\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "status-can-merge", + "status-lgt2", + "size-m", + "needs-cherry-pick-release-2-0", + "needs-cherry-pick-release-2-1" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/3148", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:20.659Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-3476-chore-configure-qps-and-burst-for-chaos-dashboard.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-3476-chore-configure-qps-and-burst-for-chaos-dashboard.json new file mode 100644 index 00000000..1e800746 --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-3476-chore-configure-qps-and-burst-for-chaos-dashboard.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:19.372Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "chaos-mesh: chore: configure QPS and Burst for chaos dashboard", + "description": "Signed-off-by: STRRL \n\n### What problem does this PR solve?\n\nclose #3475\n\n### What's changed and how it works?\n\n- append `QPS` and `Burst` to `ChaosDashboardConfig`\n- when initializing kubernetes client, respect these configurations.\n\n### Related changes\n\n- [ ] Need to update `chaos-mesh/website`\n- [ ] Need to update `Dashboard UI`\n- Need to **cheery-pick to release branches**\n - [x] release-2.3\n - [x] release-2.2\n - [x] release-2.1\n\n### Checklist\n\nCHANGELOG\n\n- [x] I have updated the `CHANGELOG.md`\n- [ ] I have labeled this PR with \"no-need-update-changelog\"\n\nTests\n\n- [ ] Unit test\n- [ ] E2E test\n- [ ] No code\n- [x] Manual test (add steps below)\n\nSide effects\n\n- [ ] Breaking backward compatibility\n\n### DCO\n\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:\n\n```shell\ngit commit --amend --signoff\ngit push --force\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "cherry-pick #3476 to release-2.3\nYou can switch your code base to this Pull Request by using [git-extras](https://github.com/tj/git-extras):\n```bash\n# In chaos-mesh repo:\ngit pr https://github.com/chaos-mesh/chaos-mesh/pull/3479\n```\n\nAfter apply modifications, you can push your change to this PR via:\n```bash\ngit push git@github.com:ti-srebot/chaos-mesh.git pr/3479:release-2.3-457723db198c\n```\n\n---\n\nSigned-off-by: STRRL \n\n### What problem does this PR solve?\n\nclose #3475\n\n### What's changed and how it works?\n\n- append `QPS` and `Burst` to `ChaosDashboardConfig`\n- when initializing kubernetes client, respect these configurations.\n\n### Related changes\n\n- [ ] Need to update `chaos-mesh/website`\n- [ ] Need to update `Dashboard UI`\n- Need to **cheery-pick to release branches**\n - [x] release-2.3\n - [x] release-2.2\n - [x] release-2.1\n\n### Checklist\n\nCHANGELOG\n\n- [x] I have updated the `CHANGELOG.md`\n- [ ] I have labeled this PR with \"no-need-update-changelog\"\n\nTests\n\n- [ ] Unit test\n- [ ] E2E test\n- [ ] No code\n- [x] Manual test (add steps below)\n\nSide effects\n\n- [ ] Breaking backward compatibility\n\n### DCO\n\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:\n\n```shell\ngit commit --amend --signoff\ngit push --force\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "git commit --amend --signoff\r\ngit push --force", + "# In chaos-mesh repo:\ngit pr https://github.com/chaos-mesh/chaos-mesh/pull/3479", + "git push git@github.com:ti-srebot/chaos-mesh.git pr/3479:release-2.3-457723db198c" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "status-can-merge", + "status-lgt2", + "size-m", + "needs-cherry-pick-release-2-1", + "needs-cherry-pick-release-2-2", + "needs-cherry-pick-release-2-3" + ], + "category": "troubleshooting", + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/chaos-mesh/chaos-mesh/pull/3476", + "sourceRepo": "chaos-mesh/chaos-mesh", + "reactions": 3, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:46:19.372Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-15383-add-wireguard-support.json b/solutions/cncf-generated/cilium/cilium-15383-add-wireguard-support.json new file mode 100644 index 00000000..db00a577 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-15383-add-wireguard-support.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:16.889Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: Add Wireguard support", + "description": "This PR adds a native Wireguard support. Currently, the feature is restricted to Kubernetes and ClusterPool (with single podCIDR per node and IP family) IPAM, and works in the direct routing mode only (the tunneling mode is going to be supported in the future).\n\nThe feature consists of two major components - `pkg/wireguard/agent` (which is run by cilium-agent) and `pkg/wireguard/operator` (run by a cilium-operator leader).\n\nAt the high level, we create a wireguard tunnel device (`cilium_wg0`) on each node, and set an IP from a dedicated subnet (in the code we call it as a wireguard subnet). Next, we generate a private key, and announce to remote nodes its public key via the CiliumNode object annotation. In addition, we use the same object to announce the wireguard tunnel IP, podCIDR. Finally, when the agent receives a remote CiliumNode object, it sets up the wireguard tunnel to the peer by using the nodeIP as wireguard endpoint address, the wireguard tunnel IP + podCIDR as allowed-ips,", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "test-net-next", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ ip rule show\r\n1: from all fwmark 0xe00/0xf00 lookup 201\r\n[...]\r\n$ ip route show table 201\r\ndefault dev cilium_wg0" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "release-note-major", + "ready-to-merge" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Pod", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/15383", + "sourceRepo": "cilium/cilium", + "reactions": 16, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:44:16.889Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-17929-k8s-daemon-implement-service-topology-aware-hints.json b/solutions/cncf-generated/cilium/cilium-17929-k8s-daemon-implement-service-topology-aware-hints.json new file mode 100644 index 00000000..125eaa46 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-17929-k8s-daemon-implement-service-topology-aware-hints.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:32.728Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: k8s,daemon: Implement service topology aware hints", + "description": "Fix https://github.com/cilium/cilium/issues/9708.\n\n```release-note\nAdd K8s Service Topology Aware Hints\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/test\n/test\n(4.19 hitting known flake https://github.com/cilium/cilium/issues/17881)\nThe 4.19 CI hit https://github.com/cilium/cilium/issues/17881.\n/test\n\nJob 'Cilium-PR-K8s-1.22-kernel-4.9' failed and has not been observed before, so may be related to your PR:\n
Click to show.\n\n### Test Name", + "### Failure Output", + "
\n\nIf it is a flake, comment `/mlh new-flake Cilium-PR-K8s-1.22-kernel-4.9` so I can create a new GitHub issue to track it.\n\nJob 'Cilium-PR-K8s-1.21-kernel-4.19' hit: #17881 (98.33% similarity) \n\nThe 4.9 CI hit #17881.\n> Where's the code logic that handles the \"awareness\" of the topology? i.e. the code that gives preference to local endpoints over non-local?\r\n\r\n@aanm Can't link that code, but the meat is in `filterEndpoints` function introduced by this PR. The topology hints for endpoints are set by kube-controller-manager.\n/test\n\nJob 'Cilium-PR-K8s-1.22-kernel-4.9' hit: #17919 (91.17% similarity) \n\n/test" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "area-daemon", + "release-note-major", + "ready-to-merge", + "backport-done-1-11" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/17929", + "sourceRepo": "cilium/cilium", + "reactions": 8, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:44:32.728Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-18166-policy-fix-selector-identity-release-for-fqdn.json b/solutions/cncf-generated/cilium/cilium-18166-policy-fix-selector-identity-release-for-fqdn.json new file mode 100644 index 00000000..d93550d9 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-18166-policy-fix-selector-identity-release-for-fqdn.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:31.386Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: policy: Fix selector identity release for FQDN", + "description": "Alexander reports in issue #18023 that establishing a connection\nvia an FQDN policy, then modifying that FQDN policy, would cause\nsubsequent traffic to the FQDN to be dropped, even if the new policy\nstill allowed the same traffic via a `toFQDNs` statement.\n\nThis was caused by overzealous release of CIDR identities while\ngenerating a new policy. Although the policy calculation itself keeps\nall `SelectorCache` entries alive during the policy generation phase (see\n`cachedSelectorPolicy.setPolicy()`), after the new policy is inserted\ninto the `PolicyCache`, the `distillery` would clean up the old\npolicy. As part of that cleanup, it would call into the individual\nselector to call the `RemoveSelectors()` function.\n\nThe previous implementation of this logic unintentionally released the\nunderlying identities any time a user of a selector was released, rather\nthan only releasing the underlying identities when the number of users\nreached zero and the selector itself would be released. This meant", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "* #17982 -- workflows: Run CodeQL workflow is the workflow is edited (@pchaigno)\n * #18155 -- docs: Update the minimum required Minikube version (@pchaigno)\n * #18166 -- policy: Fix selector identity release for FQDN (@joestringer)\n\nOnce this PR is merged, you can update the PR labels via:\n```upstream-prs\n$ for pr in 17982 18155 18166; do contrib/backporting/set-labels.py $pr done 1.10; done\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/test\nForgot to include the new policy since it was all working locally, new version includes the replacement policy (same as the original one, with just one extra `toFQDNs` statement.)\n/test\n/test-gke\r\n\r\nGKE had failed with timeouts while retrieving the Quay images, retriggering.\nK8s 1.23 / 4.9 hit an issue that looks similar to #13552, though the full stacktrace is a bit different:\r\n\r\n
\r\nStacktrace", + "
\r\n\r\nI have never seen this one, does it ring a bell to anyone?\r\n\nCI 3.0 GKE workflow hit a Hubble flow listener timeout similar to #17907.\nTravis ARM build hit #17444, retriggering as this looks like a transient infra issue.\n@nbusseneau I looked into that failure, taking the actual error report, unfortunately ginkgo decides to ignore the formatting primitives inside the string and print it as one giant long line that's hard to read... however if we just interpret those formatting primitives (and add a bit more tasty, tasty spacing :yum: ) then we get:" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "release-note-bug", + "sig-policy", + "backport-done-1-11" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/18166", + "sourceRepo": "cilium/cilium", + "reactions": 8, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:31.386Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-18463-adding-support-for-aws-eni-prefix-delegation-ipv4-only.json b/solutions/cncf-generated/cilium/cilium-18463-adding-support-for-aws-eni-prefix-delegation-ipv4-only.json new file mode 100644 index 00000000..4dafbda4 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-18463-adding-support-for-aws-eni-prefix-delegation-ipv4-only.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:22.558Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: Adding support for AWS ENI prefix delegation - IPv4 Only", + "description": "AWS introduced support for assigning prefixes to EC2 network interfaces - [ prefix delegation (pd) ](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html). Some of the benefits of using PD are:\n* Increased pod density on nodes (~ 16x more pods)\n* Reduced reliance on operator for pod IP allocation.\n* Reduced API calls to AWS and faster pod startup time.\n* Reduced cost in Amazon VPC IP Address Manager https://github.com/cilium/cilium/issues/16987#issuecomment-1006225191\n\nWith `aws-enable-prefix-delegation` flag enabled operator can now allocate /28 prefixes to resolve deficit on nodes. Once allocated, operator will update the cilium node object with the corresponding 16 IPs. Agent will use these IPs just like private secondary IPs.\n\n![pd_arch](https://user-images.githubusercontent.com/3775612/149224657-0452658e-c0eb-42db-a261-8d773340780a.png)\n\nRefer to the [RFC](https://docs.google.com/document/d/1qs6q728nud6yOr2G_wFn-h_k2DVOSo8NbNpYle4ALW8/edit#) for additional detai", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Currently in CI only `cilium-cli` based connectivity tests are run for AWS ENI mode. AFAIK, There seems to be no easy way to write e2e tests exclusive to a cloud provider. This commit introduces a new ginkgo focus group for ENI, which can be used to house e2e tests for cloud provider specific features like AWS excess IP release, ENI prefix delegation, etc. There are more features that aren't currently tested e2e in CI and this focus group should make it easy to add them.\n\nThis could also be achieved with build tags maybe ? Please suggest if there's an easier / better way to achieve this.\n\nThis PR also adds an e2e test for changes added from https://github.com/cilium/cilium/pull/17939 and is needed to trigger the e2e test in https://github.com/cilium/cilium/pull/18463\n\nLink to [successful workflow](https://github.com/DataDog/cilium/runs/5205944242?check_suite_focus=true) run with incoming changes.\n\nTodo :\n\n- [x] Github workflow changes to trigger new tests\n- [x] Add support for managing cloud provider nodegroups from ginkgo\n- [x] Support for using `cilium-cli` from ginkgo", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "### Failure Output", + "
\n\nIf it is a flake and a GitHub issue doesn't already exist to track it, comment `/mlh new-flake Cilium-PR-K8s-1.23-kernel-net-next` so I can create one.\n/test\n\nJob 'Cilium-PR-K8s-1.21-kernel-5.4' failed:\n
Click to show.\n\n### Test Name", + "### Failure Output" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "release-note-major", + "ready-to-merge", + "area-eni" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Pod", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/18463", + "sourceRepo": "cilium/cilium", + "reactions": 13, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:44:22.558Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-20090-k8s-allow-setting-multiple-k8s-api-server-addresses.json b/solutions/cncf-generated/cilium/cilium-20090-k8s-allow-setting-multiple-k8s-api-server-addresses.json new file mode 100644 index 00000000..f4d0f10e --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-20090-k8s-allow-setting-multiple-k8s-api-server-addresses.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:26.778Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: k8s: allow setting multiple k8s API server addresses", + "description": "See commit message for detailed description\n\nIntroduce a new command line parameter(`--k8s-api-server-urls`) and helm option(`k8s.apiServerURLs`) to specify multiple k8s API server addresses for the client to use.\n\nFixes: #19038 \n\n```release-note\nAllow setting of multiple k8s API server URLs for client\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@fristonio Welcome back! :sweat_smile: Would it be possible to set the new param from Helm? Currently, we use this hack to pass the API server endpoint addr - https://github.com/cilium/cilium/blob/master/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml#L193.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "@fristonio Welcome back! :sweat_smile: Would it be possible to set the new param from Helm? Currently, we use this hack to pass the API server endpoint addr - https://github.com/cilium/cilium/blob/master/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml#L193.\nHey @brb 👋 \r\nIts good to be back. 😄 \r\nYeah, I have some changes locally for the helm option that I need to test. Will update the PR soon.\n/test\nHey @qmonnet! 👋 \r\nYeah, changing the CLI flag to use `urls` instead of `addresses` makes sense to me.\r\nI have updated the PR and addressed the changes you requested. \n/test\n\nJob 'Cilium-PR-K8s-1.24-kernel-4.19' failed:\n
Click to show.\n\n### Test Name", + "### Failure Output", + "
\n\nIf it is a flake and a GitHub issue doesn't already exist to track it, comment `/mlh new-flake Cilium-PR-K8s-1.24-kernel-4.19` so I can create one.\n\nJob 'Cilium-PR-K8s-1.23-kernel-net-next' failed:\n
Click to show.\n\n### Test Name" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "area-daemon", + "release-note-minor", + "stale" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/20090", + "sourceRepo": "cilium/cilium", + "reactions": 12, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:44:26.778Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-24263-service-mesh-add-mtls-auth-method.json b/solutions/cncf-generated/cilium/cilium-24263-service-mesh-add-mtls-auth-method.json new file mode 100644 index 00000000..73e01fee --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-24263-service-mesh-add-mtls-auth-method.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:18.594Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: Service mesh: add mTLS auth method", + "description": "This adds an mTLS auth handler to the Serice Mesh auth package.\nIt will listen on a given port and does a mutual TLS handshake with\nSPIFFE IDs it received. This will assure the both sides got the needed\ncertificates.\n\nIn order to integrate with the datapath tables it also improves the SPIFFE\ninterface to use the Cilium Numeric Identities. And convert them from and\nto valid SNI fields. As well as implement code to validate the URI SANS\ninside the certificates.\n\nThis can be enabled in the in the Helm chart.\n\nFixes: #23807\n\n```release-note\nAdd mtls-spiffe as auth mode in the CiliumNetworkPolicy\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This change adds a SubscribeToRotatedIdentities function.\nThis gives a channel which is used to pass identity updates\nback from the certificate proider to the auth manager.\nIn the auth manager there can better be acted upon to\nreceive the IDs and re-trigger a mTLS handshake if needed.\n\n```release-note\nAdd a mechanism for the SPIRE server to signal rotated certificates for re-authenticating connections\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "This change adds a SubscribeToRotatedIdentities function.\r\nThis gives a channel which is used to pass identity updates\r\nback from the certificate proider to the auth manager.\r\nIn the auth manager there can better be acted upon to\r\nreceive the IDs and re-trigger a mTLS handshake if needed.", + "How to test this (I know you cannot wait ;) ):\r\n\r\nEnable it via Helm:", + "Install SPIRE: https://github.com/meyskens/cilium-spiffe-poc/tree/meyskens/cilium-mtls (`make install`)\r\n\r\nDeploy a policy to use auth, I used the connectivity test pods for this" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "kind-feature", + "release-note-major", + "ready-to-merge", + "area-servicemesh" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Service", + "Networkpolicy" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/24263", + "sourceRepo": "cilium/cilium", + "reactions": 16, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:18.594Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-26488-replace-lb-ipam-ip-allocator.json b/solutions/cncf-generated/cilium/cilium-26488-replace-lb-ipam-ip-allocator.json new file mode 100644 index 00000000..b341e20e --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-26488-replace-lb-ipam-ip-allocator.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:14.478Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: Replace LB-IPAM IP allocator", + "description": "This PR introduces a new IP allocator, replaces the existing IP allocator used by LB-IPAM with this new allocator. This removes the limitation on IPv6 ranges, the full range can now be used (only limited by the resources required to book keep). It also removes the reservation of the base and broadcast IP address making it possible to allocate single IPs. Lastly, support is now added for non-CIDR IP ranges such as x.x.x.100-x.x.x.200.\n\nFixes: #24351\nFixes: #22005\nFixes: #28255\n\n```release-note\nReplace LB-IPAM IP allocator to remove limitations and enable additional features\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> when will this pr merge?\n\nWe are currently stabilizing for the v1.14 version cut. This PR will be undrafted as soon as we can merge feature into the main branch again. First actual release is expected to be v1.15-snapshot.1 whenever that will be.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "kind-enhancement", + "release-note-minor", + "ready-to-merge", + "feature-lb-ipam" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/26488", + "sourceRepo": "cilium/cilium", + "reactions": 24, + "comments": 27 + }, + "security": { + "scannedAt": "2026-02-27T17:44:14.478Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-27464-allow-selecting-nodes-by-cidr-policy.json b/solutions/cncf-generated/cilium/cilium-27464-allow-selecting-nodes-by-cidr-policy.json new file mode 100644 index 00000000..c3181ad5 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-27464-allow-selecting-nodes-by-cidr-policy.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:29.853Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: Allow selecting nodes by CIDR policy", + "description": "This PR adds a new option, `policy-cidr-selects-nodes`, which means that cluster nodes can be selected by CIDR network policies. Normally nodes are only accessible via `remote-node` entity selectors.\n\n**How it works:**\nWe already have the notion of an \"identity scope\" -- if the top 8 bits of the 32-bit identity are 0, we assume the ID is global. If the top bits are 0x01, then the ID is a local CIDR identity. This PR declares the scope 0x02 to be for node identities.\n\nA separate identity scope is needed because the datapath needs to be able to quickly determine if a given identity corresponds to a remote node. Previously, all nodes had reserved identities 4 or 7. By using a separate identity scope, a fast bit comparison can be done without any additional map lookups.\n\nSo, most of this PR consists of refactoring commits to prepare for scoped local identity allocators. Then, a few commits at the end enable the behavior, switching nodes to the new identity scope and attaching `cidr:` label", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "(I didn't look in detail at the first 9 commits yet, but they seemed less controversial overall)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "kind-feature", + "release-note-major", + "sig-policy", + "backport-author", + "affects-v1-13", + "affects-v1-14" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/27464", + "sourceRepo": "cilium/cilium", + "reactions": 10, + "comments": 27 + }, + "security": { + "scannedAt": "2026-02-27T17:44:29.853Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-31512-do-not-snat-multicast-traffic.json b/solutions/cncf-generated/cilium/cilium-31512-do-not-snat-multicast-traffic.json new file mode 100644 index 00000000..a532c205 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-31512-do-not-snat-multicast-traffic.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:24.571Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: Do not snat multicast traffic.", + "description": "Host multicast traffic is treated as subject for SNAT and finally it breaks keepalived working on the host.\n\nFixes #31502", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@kvaster Can you provide a quick explanation of how this SNAT breaks keepalived. \n\nYou mention 'host multicast traffic'. This traffic is assumed sourced from the host. So in the case you are fixing here, what is the source being NAT'd to which is causing a failure?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "dont-merge-needs-release-note-label", + "kind-community-contribution", + "dont-merge-discussion" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/cilium/cilium/pull/31512", + "sourceRepo": "cilium/cilium", + "reactions": 12, + "comments": 2 + }, + "security": { + "scannedAt": "2026-02-27T17:44:24.571Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-31513-add-option-to-disable-externalip-mitigation-cve-2020-8554.json b/solutions/cncf-generated/cilium/cilium-31513-add-option-to-disable-externalip-mitigation-cve-2020-8554.json new file mode 100644 index 00000000..77f23288 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-31513-add-option-to-disable-externalip-mitigation-cve-2020-8554.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:23.744Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: Add option to disable ExternalIP mitigation (CVE-2020-8554).", + "description": "This mitigation has it's own drawbacks for some setups. It prevents pods communication in same cluster via ExternalIP when DSR is enabled.\n\nFixes #28187", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "area-datapath", + "release-note-minor", + "ready-to-merge", + "kind-community-contribution", + "area-loadbalancing", + "feature-dsr", + "feature-socket-lb" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/cilium/cilium/pull/31513", + "sourceRepo": "cilium/cilium", + "reactions": 13, + "comments": 5 + }, + "security": { + "scannedAt": "2026-02-27T17:44:23.745Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-32352-ipam-adds-aws-eni-ipv6-prefix-delegation-support.json b/solutions/cncf-generated/cilium/cilium-32352-ipam-adds-aws-eni-ipv6-prefix-delegation-support.json new file mode 100644 index 00000000..0f8c2b86 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-32352-ipam-adds-aws-eni-ipv6-prefix-delegation-support.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:33.856Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: IPAM: Adds AWS ENI IPv6 Prefix Delegation Support", + "description": "Previously, ENI prefix delegation support was limited to IPv4.\nThis PR updates existing structs, methods, etc. to support IPv6\nprefix delegation. The overall implementation follows the [existing\napproach](https://github.com/cilium/cilium/issues/16987) taken for IPv4 prefix delegation.\n\nAlthough AWS assigns a /80 prefix to an ENI, 64 addresses are the\nmaximum number of allocatable addresses. This restriction can be user\nconfigurable in the future if needed.\n\nFixes: #19251\nFixes: #18405\n\n```release-note\nAdds \"aws-enable-ipv6-prefix-delegation\" configuration flag to enable IPv6 prefix delegation support for ENI IPAM mode in AWS.\n```\n\nIncludes commit 3b20d9cd3a91fc7232ccf2812343bc028054b252 from #31145. When #31145 is merged, I will rebase to remove this commit so ignore this commit from your review.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Some example PRs from recent large-size change sets:\n\nhttps://github.com/cilium/cilium/pull/32336/commits\nhttps://github.com/cilium/cilium/pull/32125/commits\n\nIn these case I believe the reviewers also held some conversations to explain the changes to the reviewers.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Includes commit 3b20d9cd3a91fc7232ccf2812343bc028054b252 from #31145. When #31145 is merged, I will rebase to remove this commit so ignore this commit from your review.\r\n\n\n@danehans The main commit here is quite long, would it be possible to break this down into a couple smaller commits to make the changes easier to review/understand?\n> @danehans The main commit here is quite long, would it be possible to break this down into a couple smaller commits to make the changes easier to review/understand?\r\n\r\n@tommyp1ckles thanks for reviewing the PR. I originally broke the PR into ~4 commits but the `Check if build works for every commit` CI job was failing. Let me know if you have any suggestions on the best way to break the PR up into multiple commits that will pass this job.\nSome example PRs from recent large-size change sets:\r\n\r\nhttps://github.com/cilium/cilium/pull/32336/commits\r\nhttps://github.com/cilium/cilium/pull/32125/commits\r\n\r\nIn these case I believe the reviewers also held some conversations to explain the changes to the reviewers.\n@asauber thanks for the review and guidance. Let me look into refactoring the PR into multiple commits.\nTo verify the e2e functionality of this PR, perform the following steps:\r\n\r\nCreate an EC2 cluster:", + "__Note:__ `withOIDC: true` is required for IPv6 based on the [eksctl docs](https://eksctl.io/usage/vpc-ip-family/?h=ipv6) and I disabled `privateNetworking` to verify the kernel version of instances using SSH.\r\n\r\nPatch VPC CNI:", + "Before installing Cilium, a few dependencies must be met. First, create an IAM policy doc required for the operator to function:" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "release-note-minor", + "stale" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/32352", + "sourceRepo": "cilium/cilium", + "reactions": 8, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:44:33.856Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-32730-introduce-force-device-detection-option.json b/solutions/cncf-generated/cilium/cilium-32730-introduce-force-device-detection-option.json new file mode 100644 index 00000000..1153f289 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-32730-introduce-force-device-detection-option.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:20.803Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: Introduce --force-device-detection option", + "description": "- Introduce --enforce-device-detection option\n- Helm chart: enforceDeviceDetection option\n- Add tests for EnforceDeviceDetection option\n\nPlease ensure your pull request adheres to the following guidelines:\n\n- [x] For first time contributors, read [Submitting a pull request](https://docs.cilium.io/en/stable/contributing/development/contributing_guide/#submitting-a-pull-request)\n- [x] All code is covered by unit and/or runtime tests where feasible.\n- [x] All commits contain a well written commit description including a title,\n description and a `Fixes: #XXX` line if the commit addresses a particular\n GitHub issue.\n- [x] If your commit description contains a `Fixes: ` tag, then\n please add the commit author[s] as reviewer[s] to this issue.\n- [x] All commits are signed off. See the section [Developer’s Certificate of Origin](https://docs.cilium.io/en/stable/contributing/development/contributing_guide/#dev-coo)\n- [x] Provide a title or release-note blurb suitable f", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "- Introduce --enforce-device-detection option\n- Helm chart: enforceDeviceDetection option\n- Add tests for EnforceDeviceDetection option\n\nPlease ensure your pull request adheres to the following guidelines:\n\n- [x] For first time contributors, read [Submitting a pull request](https://docs.cilium.io/en/stable/contributing/development/contributing_guide/#submitting-a-pull-request)\n- [x] All code is covered by unit and/or runtime tests where feasible.\n- [x] All commits contain a well written commit description including a title,\n description and a `Fixes: #XXX` line if the commit addresses a particular\n GitHub issue.\n- [x] If your commit description contains a `Fixes: ` tag, then\n please add the commit author[s] as reviewer[s] to this issue.\n- [x] All commits are signed off. See the section [Developer’s Certificate of Origin](https://docs.cilium.io/en/stable/contributing/development/contributing_guide/#dev-coo)\n- [x] Provide a title or release-note blurb suitable for the release notes.\n- [x] Are you a user of Cilium? Added https://github.com/cilium/cilium/pull/32738\n- [x] Thanks for contributing!\n\nFixes: https://github.com/cilium/cilium/issues/32721\n\n```release-note\nIntroduce --enforce-device-detection option to enable the auto-detection even if specific devices are explicitly listed\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "- Introduce --enforce-device-detection option\r\n- Helm chart: enforceDeviceDetection option\r\n- Add tests for EnforceDeviceDetection option\r\n\r\nPlease ensure your pull request adheres to the following guidelines:\r\n\r\n- [x] For first time contributors, read [Submitting a pull request](https://docs.cilium.io/en/stable/contributing/development/contributing_guide/#submitting-a-pull-request)\r\n- [x] All code is covered by unit and/or runtime tests where feasible.\r\n- [x] All commits contain a well written commit description including a title,\r\n description and a `Fixes: #XXX` line if the commit addresses a particular\r\n GitHub issue.\r\n- [x] If your commit description contains a `Fixes: ` tag, then\r\n please add the commit author[s] as reviewer[s] to this issue.\r\n- [x] All commits are signed off. See the section [Developer’s Certificate of Origin](https://docs.cilium.io/en/stable/contributing/development/contributing_guide/#dev-coo)\r\n- [x] Provide a title or release-note blurb suitable for the release notes.\r\n- [x] Are you a user of Cilium? Added https://github.com/cilium/cilium/pull/32738\r\n- [x] Thanks for contributing!\r\n\r\n\r\n\r\nFixes: https://github.com/cilium/cilium/issues/32721", + "Commit e08b21edc628a6d7f42fafb1d54e4fc1af5ac66f does not match \"(?m)^Signed-off-by:\".\n\nPlease follow instructions provided in https://docs.cilium.io/en/stable/contributing/development/contributing_guide/#developer-s-certificate-of-origin\nCould you speak a bit about why we need three options for configuring devices? I would have expected one of two:\r\n\r\n(a) Auto-detection. Cilium tries its best to select devices, and Cilium is responsible for picking the devices.\r\n(b) Case (a) didn't work for some reason (bug?), so the user wants to override the set of devices. In this case Cilium is clearly *not* responsible for picking the devices.\r\n\r\nCase (c) Both Cilium and the user specify devices seems like it confuses who is responsible for this logic, and it's yet another complicated flag in an already complicated set of flags for the agent. Can't we just fix (a) or (b) to better cover your use case?\n@joestringer sure, I can explain. The reason is that we're not end user, we're adopting Cilium together with [kube-ovn](https://github.com/kubeovn/kube-ovn) in [Cozystack](https://github.com/aenix.io/cozystack) a free platform for running managed services. We need kube-ovn as it provides extensive network fabric for running VirtualMachines using KubeVirt.\r\n\r\nOur users might have various hardware configurations with various interfaces, eg. some of them using bonding and vlan interfaces, some of them are not, so devices must be auto-detected.\r\n\r\nBut in order to make this configuration working, the `ovn0` device should also be added to the list of auto-detected devices.\r\n\r\nThe reason I'm insist for adding an extra option and not extend autodetection rules, is that I don't want to break exiting installations. Eg. OpenShift project also uses OVN but different implementation [ovn-kubernetes](https://github.com/ovn-org/ovn-kubernetes). Some other users might want to run Kubernetes with Cilium on hypervisors with OVN which is unrelated to the Kubernetes networking. Eg, Mirantis uses Kubernetes to provision OpenStack platform, OVN is a default networking for OpenStack for now.\r\n\r\nAnother user-story that I see is that there might be other users who might want to do the same, eg. to create an extra VPN device on every node, that must be always specified in a list in addition to auto-detection rules.\nThe use-case and the ability to configure this makes sense to me. Though I think we need to consider how to expose this to users. I find `--enforce-device-detection` to be misleading, especially as we had earlier this notion of whether or not to detect any devices at all, so I think it'd be better not to use that wording.\r\n\r\nWhat we essentially want to accomplish here is filter based on contents of `--devices`, but also apply the normal device filtering/detection logic. My suggestion would be to flip this around and have an option e.g. \"--force-devices\" (maybe someone can think of a better name) that is set to true by default and has the meaning that \"--devices\" are chosen without the additional filtering.\r\n\r\nOr we could also have a separate option \"--device-filter\", but that's likely more confusing, both to users and in terms of implementation.\nOriginal idea was to call it `--enforce-devices-detection`, I renamed it into singular form after `--enable-runtime-device-detection` option\n@aanm I answered here https://github.com/cilium/cilium/pull/32730#issuecomment-2136661151\r\n\r\nWe provide a [platform](https://cozystack.io/), which simplifies configuration for users to run VMs and managed services in Kubernetes. Cilium and kube-ovn is used there. The same as you, we want to support device autodetection which would work in various environments. I'll list some of them:\r\n\r\n\r\nmultiple interfaces:", + "bonding with vlan" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "release-note-minor", + "ready-to-merge", + "kind-community-contribution" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/32730", + "sourceRepo": "cilium/cilium", + "reactions": 14, + "comments": 25 + }, + "security": { + "scannedAt": "2026-02-27T17:44:20.803Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-33434-service-differentiate-udp-and-tcp-protocols.json b/solutions/cncf-generated/cilium/cilium-33434-service-differentiate-udp-and-tcp-protocols.json new file mode 100644 index 00000000..30e7e71a --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-33434-service-differentiate-udp-and-tcp-protocols.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:28.481Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: service: differentiate UDP and TCP protocols", + "description": "This PR introduces support for protocol differentiation (UDP, TCP) for services.\n\nThe high level idea (first commit) is to take into account the L4 protocol specified in a k8s service when populating the bpf lb maps, and then use the packet's protocol to lookup services in those maps.\n\nNext, a new `--bpf-lb-proto-diff` flag (enabled by default) is introduced. When disabled, the agent strips the protocol from the service object, so even though all the protocol differentiation logic is in place, the control plane sees only services with `NONE` protocol, effectively falling back to the old behavior. In addition to that also the datapath stops taking into account the protocol when looking up the lb maps.\n\nNext we tackle the reason why the previous attempts didn't make it into the main branch: connection disruptions during upgrades and downgrades. The proposed solution is to keep, during upgrades, existing old style/`NONE` services as such instead of deleting them and creating new protocol-", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "update the logic used to wait for local redirect bpf entries as from\nv1.16 we start supporting protocol differentiation for services, which\nmeans the frontend string description will now include also the protocol", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "area-datapath", + "kind-feature", + "release-note-major", + "ready-to-merge", + "area-loadbalancing" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/33434", + "sourceRepo": "cilium/cilium", + "reactions": 11, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:44:28.481Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-37601-kpr-support-kube-apiserver-ha.json b/solutions/cncf-generated/cilium/cilium-37601-kpr-support-kube-apiserver-ha.json new file mode 100644 index 00000000..60e6aceb --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-37601-kpr-support-kube-apiserver-ha.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:15.823Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: kpr: Support kube-apiserver HA", + "description": "Support kube-apiserver high availability with kube-proxy replacement where the agent can fail over to an active kube-apiserver at runtime. \n\n### Background \nCilium agent requires a connection to the kube-apiserver control plane to program the BPF datapath without depending on kube-proxy load-balancing when kube-proxy replacement is enabled. To achieve this, Cilium uses `API_SERVER_IP` and `API_SERVER_PORT` configurations for direct connection to the kube-apiserver. However, this approach doesn't support production environments that require multiple kube-apiservers for high availability. Additionally, it cannot rely on a fixed set of addresses provided at bootstrap time due to potential Kubernetes node failures or recycling in production environments.\n\n### Commits summary\nThis PR introduces a flag for user to configure multiple kube-apiserver URLs. Additionally, it adds logic to connect to one of the active kube-apiservers in a cluster during initial connection time, and switches over t", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/test\n/test\n/test\n/test\n/test\n/test\nci-clustermesh is failing with known breakages. Rebasing the PR to pick up the upstream fixes.\r\n\r\nOther failures - \r\n\r\nhttps://github.com/cilium/cilium/issues/36902\r\nhttps://github.com/cilium/cilium/issues/37763\n/test\n/test\n/test\n/test\n/test\n> Docs good. A paragraph under https://docs.cilium.io/en/latest/network/kubernetes/kubeproxy-free/ would be nice as a follow-up too.\r\n\r\nYes, the KPR documentation does warrant a paragraph. I'll push a commit once in-flight reviews are in.\n/test\n@lambdanis @dylandreimerink Thanks for the reviews -- addressed your comments. PTAL!\n/test\nSo, an interesting thing about how how the `default/kubernetes` service works: it's Special. It doesn't use a label selector like other services. Rather, each apiserver manually adds its own IP to the Endpoints when starting, and removes itself when going down.\r\n\r\nAn interesting problem is that Azure Kubernetes Service only ever has zero-or-one IP in the Endpoints. When the apiserver is being failed over, the old one first goes down, briefly causing 0 Endpoints to back the service. Then a new one is created and life goes on.\r\n\r\nFor clients connecting to the domain name this works fine. Likewise, for clients connecting to the service IP, this also works. However, does this code handle that case? It's very hard to tell.\r\n\r\nI would ask you to write some docblocks for the resolving code; I had a very hard time understanding what the functions do. Given that any potential issues are likely to be very high severity, it would be nice to have this well-documented.\n> An interesting problem is that Azure Kubernetes Service only ever has zero-or-one IP in the Endpoints. When the apiserver is being failed over, the old one first goes down, briefly causing 0 Endpoints to back the service. Then a new one is created and life goes on.\r\n> \r\n> For clients connecting to the domain name this works fine. Likewise, for clients connecting to the service IP, this also works. However, does this code handle that case? It's very hard to tell.\r\n> \r\n\r\nThe agent does switch to the apiservice address, but I don't know about the internal details of the AKS case.\r\nCan you elaborate how clients connect to the service VIP when there are 0 endpoints? \r\n\r\n> I would ask you to write some docblocks for the resolving code; I had a very hard time understanding what the functions do. Given that any potential issues are likely to be very high severity, it would be nice to have this well-documented.\r\n\r\nAgreed, there are in-line comments throughout the code that handles the HA functionality. But I'll also add a high level overview, and some more comments. \n> Have we considered that client-side load balancing for this feature might not be the right approach?\r\n> \r\n> Kubernetes pods (including Cilium Agent) have the `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` environment variables, which allow for server-side load balancing of the Kubernetes API server.\r\n> \r\n> This load balancing is then typically the responsibility of the Kubernetes control plane provider.\r\n> \r\n> Example:\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "kind-feature", + "release-note-major", + "area-loadbalancing" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Service", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/37601", + "sourceRepo": "cilium/cilium", + "reactions": 24, + "comments": 25 + }, + "security": { + "scannedAt": "2026-02-27T17:44:15.823Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-39304-multigateway-support-for-egress-gateway.json b/solutions/cncf-generated/cilium/cilium-39304-multigateway-support-for-egress-gateway.json new file mode 100644 index 00000000..8d5c552c --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-39304-multigateway-support-for-egress-gateway.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:35.188Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cilium: Multigateway support for Egress Gateway", + "description": "Add Multigateway support to egress gateway. \n\n[Link to the CFP](https://github.com/cilium/design-cfps/blob/main/cilium/CFP-38341-multigateway-egress-gateway-policy.md)\n\nFixes: #38341\n\n```release-note\nAdd support for multiple gateways to Cilium Egress Gateway.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@bowei", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "release-note-major", + "feature-egress-gateway", + "kind-community-contribution" + ], + "category": "networking", + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cilium/cilium/pull/39304", + "sourceRepo": "cilium/cilium", + "reactions": 8, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:44:35.188Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cloud-custodian/cloud-custodian-5595-dont-require-countunit-in-ecr-lifecycles.json b/solutions/cncf-generated/cloud-custodian/cloud-custodian-5595-dont-require-countunit-in-ecr-lifecycles.json new file mode 100644 index 00000000..1c088c71 --- /dev/null +++ b/solutions/cncf-generated/cloud-custodian/cloud-custodian-5595-dont-require-countunit-in-ecr-lifecycles.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:22.783Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cloud-custodian: Dont require countUnit in ecr lifecycles", + "description": "Resolves #5593", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thanks for the add! Something to be aware of there is that it will look for the countUnit if countType is sinceImagePushed. \nhttps://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html\nBut fortunately there is already a validation in place on line 203.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "I updated my comment, its called at validation time already so it shouldnt be necessary to add complex schema validation\n> I updated my comment, its called at validation time already so it shouldnt be necessary to add complex schema validation\r\n\r\nthe issue is that python validation isn't fully correct to handle all cases (per the note I wrote inline to that function), ie. this is missing countNumber and passes validation, but blows up at runtime." + ] + } + }, + "metadata": { + "tags": [ + "cloud-custodian", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "cloud-custodian" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cloud-custodian/cloud-custodian/pull/5595", + "sourceRepo": "cloud-custodian/cloud-custodian", + "reactions": 2, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:46:22.783Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cloud-native-network/cloud-native-network-266-upgrade-oci-spec-rs-to-0-4-0.json b/solutions/cncf-generated/cloud-native-network/cloud-native-network-266-upgrade-oci-spec-rs-to-0-4-0.json new file mode 100644 index 00000000..40928a18 --- /dev/null +++ b/solutions/cncf-generated/cloud-native-network/cloud-native-network-266-upgrade-oci-spec-rs-to-0-4-0.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:54.637Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cloud-native-network: Upgrade oci-spec-rs to 0.4.0", + "description": "fix: #225 \n\n## Common\n- [x] Fix import path: `use oci_spec::XXX` -> `use oci_spec::runtime::XXX`\n\n## Upgrade oci-spec-rs in `cgroups`\n- [x] Port FreezerState to `cgroups` crate\n - [x] `Controller::apply()` receive `ContainerOpt` instead of `LinuxResources`\n- ~~Remove `A` from `LinuxDeviceType`~~\n\n## Upgrade oci-spec-rs in `youki`\n- [x] Fix capability type (Capability type change: Vec -> HashSet)\n- [x] Implement functions equivalent to `LinuxDeviceType::to_sflag` in youki.\n\n## ~Upgrade oci-spec-rs to `v0.5.1`~\n\n- Apply builder pattern\n - https://github.com/containers/oci-spec-rs/pull/69\n - `src/container/tenant_builder.rs` rewrite", + "type": "upgrade", + "status": "completed", + "resolution": { + "summary": "ref. https://github.com/containers/youki/pull/266#issuecomment-915201298", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "@guni1192 Hi not sure if merge conflicts are becoming a lot of work with all the other PR going in. If you have a good idea on when you can get the PR ready, may be we can stop merging other PR for a day or two to get this PR merged. This is an important PR and we probably want this to go in as soon as possible. Let us know how we can help you and/or ping us on Discord.\n@yihuaf Thank you very much for your kindness.\r\nI'm currently dealing with an issue where conflicts and integration tests are failing.\r\nI hope to address this issue by the end of the week, but I need a favor.\r\n\r\n- Please wait a bit before merging PRs that might conflict.\r\n- I don't know why the tests in `linux_cgroups_devices/linux_cgroups_devices.t` fail, so I need your help. \r\n Unfortunately, I don't have enough knowledge about cgroup device.\n@guni1192 What is the error? When I run the test with your changes it is passing.\n@Furisto Thanks for trying it out.\r\nBoth CI and local fail with the same test cases.\r\nI am using the Cgroup v2 systemd driver environment.\r\nThe result is as follows.", + "I have a feeling that this is probably an effect of this.\r\nhttps://github.com/containers/oci-spec-rs/pull/16\r\n@Furisto \r\nWhat do you think?\n@utam0k Yes, that was also my suspicion.\n@Furisto \r\nIt seemed we needed the `A`. I'm sorry, I forgot. I'll create a PR to fix it.\r\nhttps://www.kernel.org/doc/Documentation/cgroup-v1/devices.txt\r\n> A whitelist entry has 4 fields.\r\n> 'type' is a (all), c (char), or b (block). \n@guni1192 @Furisto I got the 0.4.0 oci-spec-rs locally and applied the same fix as this PR and it passed the integration test, so this is probably the cause of the problem.\r\nhttps://github.com/containers/oci-spec-rs/pull/65\n@guni1192 I'm going to ask them to release a fixed version, 0.5.1. However, it may be difficult to address this issue without upgrading the version to 0.5.1. Sorry...\n@utam0k I understand. I'll try as much as I can.\n@guni1192 The change to the builder pattern would not have been that exciting. Very helpful!\nNot sure if this is helpful, but maybe we can bump commit up instead of version, if the changes are too big to handle in one PR? Then there may be smaller bits to bite off? Just a suggestion :)\n@yihuaf \r\nThis is my mistake, but there were some bugs in oci-spec-rs, so CI won't go through unless the current oci-spec-rs is the latest version.\r\n@guni1192 \r\nHowever, if you need it, I can fork it and prepare a backported version to 0.4.0 with bug fixes. What do you think?\n@utam0k I thought it was a good idea too.\r\nIt looks like supporting the builder pattern for youki will be a bigger task than I thought.\r\nIn particular, I expect the changes to `tenant_builder.rs` to be significant.\r\nI would like you to backport your patch to 0.4.0.\n@guni1192 I prepared v0.4.0 with a bug fix.\r\nhttps://github.com/utam0k/oci-spec-rs/tree/v0.4.0-with-bugfix\r\n\r\nAnd I have tried using it and did integration test. I applied it and pass all tests." + ] + } + }, + "metadata": { + "tags": [ + "cloud-native-network", + "sandbox", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "cloud-native-network" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/youki-dev/youki/pull/266", + "sourceRepo": "youki-dev/youki", + "reactions": 3, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:46:54.638Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cloud-native-network/cloud-native-network-292-implemented-seccomp-and-pass-the-integration-test.json b/solutions/cncf-generated/cloud-native-network/cloud-native-network-292-implemented-seccomp-and-pass-the-integration-test.json new file mode 100644 index 00000000..3cc521bd --- /dev/null +++ b/solutions/cncf-generated/cloud-native-network/cloud-native-network-292-implemented-seccomp-and-pass-the-integration-test.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:53.281Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cloud-native-network: Implemented seccomp and pass the integration test", + "description": "Fix #25 \n\nSo this turns out to be simpler than I thought, thanks to the existance of `libseccomp`. `runc` also uses `libseccomp` which does all the heavy lifting.\n\nOne concern I have is the `seccomp-sys` repo. We don't have a lot of choices for a rust binding for libseccomp and this one looks reasonable. However, it is LGPL, so we may want to roll our own bindings. I used this repo just for prototype. If we are NOT OK with LGPL code into our repo, then we should discuss on what is the right thing to do here.\n\nRegardless, this is a functional prototypes now, so I would like you guys to take a look. The PR can be merged as is, given the issue mentioned above.\n\nTODO:\n- [ ] `oci-spec-rs` is missing a few fields for seccomp. For example, the default error code is hardcoded to EPERM at the moment.\n- [ ] Our own libssecomp bindings??", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@utam0k Hi.\nA couple of things are still missing:\n1. The definition of the system calls allowed for each architecture in the form of a collection of strings\n2. Integrate the filter definition when starting the container process.\nSomething like [this](https://github.com/opencontainers/runc/blob/654f3319762c896e1f0b23eef0d2cd70fdd6b9b1/libcontainer/standard_init_linux.go#L161)\n3. Add tests\n4. Try to better define the constants, files and seccomp folder if possible\n\nI would love to receive a hand from you, the topic is not too simple. Feel free to look on the points I have marked and others as well if you feel appropriate and / or necessary.", + "steps": [ + "The definition of the system calls allowed for each architecture in the form of a collection of strings", + "Integrate the filter definition when starting the container process.", + "Add tests", + "Try to better define the constants, files and seccomp folder if possible" + ] + } + }, + "metadata": { + "tags": [ + "cloud-native-network", + "sandbox", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "cloud-native-network" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/youki-dev/youki/pull/292", + "sourceRepo": "youki-dev/youki", + "reactions": 3, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:46:53.281Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cloud-native-network/cloud-native-network-3230-implement-linux-memory-policy.json b/solutions/cncf-generated/cloud-native-network/cloud-native-network-3230-implement-linux-memory-policy.json new file mode 100644 index 00000000..82672970 --- /dev/null +++ b/solutions/cncf-generated/cloud-native-network/cloud-native-network-3230-implement-linux-memory-policy.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:56.930Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cloud-native-network: Implement Linux memory policy", + "description": "This PR adds support for the new `linux.memoryPolicy` field introduced in the OCI Runtime Spec (opencontainers/runtime-spec#1282). \nWith this change, youki can configure NUMA memory policies for containers by calling `set_mempolicy(2)` during container initiali", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "#### What type of PR is this?\n\n/kind api-change\n\n#### What this PR does / why we need it:\n\nThis change was made to the OCI specification.\n\nhttps://github.com/opencontainers/runtime-spec/pull/1294/files\n\n#### Which issue(s) this PR fixes:\n\nNone\n\n#### Special notes for your reviewer:\n\nThis change is required based on the following PR:\n\nhttps://github.com/youki-dev/youki/pull/3230\n\n#### Does this PR introduce a user-facing change?\n\n```release-note\nAs per OCI Runtime Spec PR #1282, `Linux.memoryPolicy.nodes` has become optional.\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Since oci-spec-rs crate—which includes support for Linux memory policy—hasn't yet been released, I couldn't perform tests via CI. However, I have added integration tests. By cloning the branch containing [these changes](https://github.com/youki-dev/oci-spec-rs/pull/292) in oci-spec-rs, editing youroki's Cargo.toml to point to the local crate, and running tests locally, we were able to verify that the tests pass.\nThank you for the thorough code review.\r\nSorry for the delay!\r\nI've made all the requested changes. Please take a look.\r\n\r\nOn a machine with two NUMA nodes, I have verified that the tests pass as follows:" + ] + } + }, + "metadata": { + "tags": [ + "cloud-native-network", + "sandbox", + "networking", + "kind-feature" + ], + "category": "networking", + "cncfProjects": [ + "cloud-native-network" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/youki-dev/youki/pull/3230", + "sourceRepo": "youki-dev/youki", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:56.930Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cloud-native-network/cloud-native-network-773-update-the-install-instructions-for-fedora.json b/solutions/cncf-generated/cloud-native-network/cloud-native-network-773-update-the-install-instructions-for-fedora.json new file mode 100644 index 00000000..44c94316 --- /dev/null +++ b/solutions/cncf-generated/cloud-native-network/cloud-native-network-773-update-the-install-instructions-for-fedora.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:55.476Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cloud-native-network: Update the install instructions for Fedora", + "description": "For some reason, when installing the build tools, the static glibc was\nnot installed.\n\nFixes https://github.com/containers/youki/issues/772\n\n", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "@RMPR First of all, thanks for your PR. I wonder if this is a problem only for youki. Can you build other rust projects?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "❯ cargo build --target x86_64-unknown-linux-musl\r\n Compiling cfg-if v1.0.0\r\n Compiling lazy_static v1.4.0\r\n Compiling regex-syntax v0.6.23\r\n Compiling scopeguard v1.1.0\r\nerror[E0463]: can't find crate for `core`\r\n |\r\n = note: the `x86_64-unknown-linux-musl` target may not be installed\r\n = help: consider downloading the target with `rustup target add x86_64-unknown-linux-musl`\r\n\r\nFor more information about this error, try `rustc --explain E0463`.\r\nerror: could not compile `scopeguard` due to previous error\r\nwarning: build failed, waiting for other jobs to finish...\r\nerror[E0463]: can't find crate for `std`\r\n |\r\n = note: the `x86_64-unknown-linux-musl` target may not be installed\r\n = help: consider downloading the target with `rustup target add x86_64-unknown-linux-musl`\r\n\r\nerror: build failed", + "There is no rustup in my distro's repository. [It seems](https://users.rust-lang.org/t/how-can-we-add-target-without-rustup/52762) the manual process of crosscompiling is a bit more involved, I can't do that right now, but as soon as I complete that I come back to you.\nI tried building youki on a fresh installation of Fedora 35 and I can build it with just rustup and the dependencies that we already have in the readme. This looks like a problem with your system.\n@Furisto you are right, using rustup it does work, but @utam0k I still think this is valuable because not everyone is comfortable not using packages from the official repositories. You can easily reproduce the error I highlighted on a fresh Fedora install:" + ] + } + }, + "metadata": { + "tags": [ + "cloud-native-network", + "sandbox", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "cloud-native-network" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/youki-dev/youki/pull/773", + "sourceRepo": "youki-dev/youki", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:55.477Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cloudevents/cloudevents-1330-adding-verifiability-proposal.json b/solutions/cncf-generated/cloudevents/cloudevents-1330-adding-verifiability-proposal.json new file mode 100644 index 00000000..8472b41a --- /dev/null +++ b/solutions/cncf-generated/cloudevents/cloudevents-1330-adding-verifiability-proposal.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:37.429Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cloudevents: Adding verifiability proposal", + "description": "The \"CloudEvents Verifiability\" proposal introduces a transport protocol-agnostic mechanism to enhance the security of CloudEvents. It enables event producers to sign the events they emit, allowing consumers to cryptographically verify both the authenticity and integrity of the received events. This ensures that consumers can trust the source of the events and be confident that the events have not been tampered with during transit. The primary threats addressed by this proposal are impersonation of event producers and unauthori", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "A top level comment with only a suggestion. In the past when the group has discussed this sort of thing, we have thought that it would start as an official extension. You may want to consider moving these into the extensions folder because this will provide a lower probability of rejection and a perhaps lower bar for acceptance. We will, of course, do our best to provide sufficient constructive criticism to yield a more useful result.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "cloudevents/extensions/verifiability.md: Translation file cloudevents/languages/he/extensions/verifiability.md does not exist\r\ncloudevents/extensions/verifiability.md: Translation file cloudevents/languages/zh-CN/extensions/verifiability.md does not exist", + "ce-id: 1234\r\nce-type: com.mycustom.type\r\nce-source: /my/source", + "{\r\n \"id\": \"1234\",\r\n \"type\": \"com.mycustom.foo\",\r\n \"source\": \"/my/source\",\r\n \"specversion\": \"1.0\",\r\n \"data\": { ... }\r\n}" + ] + } + }, + "metadata": { + "tags": [ + "cloudevents", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "cloudevents" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cloudevents/spec/pull/1330", + "sourceRepo": "cloudevents/spec", + "reactions": 1, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:44:37.429Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cloudevents/cloudevents-148-http-transport-and-json-mapping.json b/solutions/cncf-generated/cloudevents/cloudevents-148-http-transport-and-json-mapping.json new file mode 100644 index 00000000..7714aec4 --- /dev/null +++ b/solutions/cncf-generated/cloudevents/cloudevents-148-http-transport-and-json-mapping.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:40.524Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cloudevents: HTTP transport and JSON mapping", + "description": "Refers to #93 \n\nThese two documents are proposals for the HTTP transport mapping and for the JSON event format. \n\nExpect some unprompted churn for the next couple of days while I add a further clarifications and examples and do fixups for links, etc.\n\nPlease ask each and all questions you may have here in the form of comments; all questions will help to make the spec more legible. \n\nCloses: #93\n\nSigned-off-by: clemensv ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR complements https://github.com/cloudevents/spec/pull/148 \n\nThis specification is a *generic* specification that formalizes delivery of notifications over HTTP, generally known as \"Web hooks\". The HTTP transport binding for Cloud Events composes with this specification for one-way event delivery.\n\nThe specification defines that notifications are delivered by HTTP POST, should use a token-based authNZ scheme either carried in the Authorization header or the query string (whereby \"token\" can be a simple key), and strongly suggests implementation of a pre-flight handshake (similar to [W3C CORS](www.w3.org/TR/cors/)) that protects a sender from being abused as a distributed denial of service machine gun.\n\nThis specification would probably be best proposed to IETF, but here's a good place to start with the draft.\n\nThis is not relevant to the 0.1 of the core spec, but is relevant to the interop group\n\nSigned-off-by: Clemens Vasters ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cloudevents", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "cloudevents" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cloudevents/spec/pull/148", + "sourceRepo": "cloudevents/spec", + "reactions": 0, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:44:40.524Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cloudevents/cloudevents-660-docs-add-possibility-of-additional-message-modes.json b/solutions/cncf-generated/cloudevents/cloudevents-660-docs-add-possibility-of-additional-message-modes.json new file mode 100644 index 00000000..c7a0444e --- /dev/null +++ b/solutions/cncf-generated/cloudevents/cloudevents-660-docs-add-possibility-of-additional-message-modes.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:38.972Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cloudevents: docs: add possibility of additional message modes.", + "description": "Mentions the possibility of protocols adding different message modes (such as `batched`).\nFixes #645.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Another attempt to fix https://github.com/cloudevents/spec/issues/645\n\nSee also https://github.com/cloudevents/spec/pull/660\n\nSigned-off-by: Christoph Neijenhuis ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Signed-off-by: Grant Timmerman " + ] + } + }, + "metadata": { + "tags": [ + "cloudevents", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "cloudevents" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cloudevents/spec/pull/660", + "sourceRepo": "cloudevents/spec", + "reactions": 0, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:44:38.972Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/clusternet/clusternet-635-add-helm-options-upgradeatomic-parameter.json b/solutions/cncf-generated/clusternet/clusternet-635-add-helm-options-upgradeatomic-parameter.json new file mode 100644 index 00000000..97ed7636 --- /dev/null +++ b/solutions/cncf-generated/clusternet/clusternet-635-add-helm-options-upgradeatomic-parameter.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:59.749Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "clusternet: add helm options upgradeAtomic parameter", + "description": "#### What type of PR is this?\nkind/feature\n\n#### What this PR does / why we need it:\n\n#### Which issue(s) this PR fixes:\n\nFixes #625 \n\n#### Special notes for your reviewer:", + "type": "upgrade", + "status": "completed", + "resolution": { + "summary": "#### What type of PR is this?\nA followup of #635\n\n#### What this PR does / why we need it:\n\n#### Which issue(s) this PR fixes:\n\nFixes #\n\n#### Special notes for your reviewer:", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Generating CRDs at manifests/crds\r\nGenerating deepcopy funcs\r\nF0323 17:27:05.642928 41217 main.go:83] Error: Failed executing generator: some packages had errors:\r\nerrors in package \"github.com/clusternet/clusternet/pkg/apis/apps/v1alpha1\":\r\nopen ../../../github.com/clusternet/clusternet/pkg/apis/apps/v1alpha1/zz_generated.deepcopy.go: no such file or directory\r\n\r\nerrors in package \"github.com/clusternet/clusternet/pkg/apis/clusters/v1beta1\":\r\nopen ../../../github.com/clusternet/clusternet/pkg/apis/clusters/v1beta1/zz_generated.deepcopy.go: no such file or directory\r\n\r\nerrors in package \"github.com/clusternet/clusternet/pkg/apis/proxies/v1alpha1\":\r\nopen ../../../github.com/clusternet/clusternet/pkg/apis/proxies/v1alpha1/zz_generated.deepcopy.go: no such file or directory\r\n\r\nmake: *** [generated] Error 255", + "Generating OpenAPI definitions for proxies:v1alpha1 at github.com/clusternet/clusternet/pkg/generated/openapi\r\nAPI rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,APIGroup,ServerAddressByClientCIDRs\r\nAPI rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,APIGroup,Versions\r\nAPI rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,APIGroupList,Groups\r\nAPI rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,APIResource,Categories\r\nAPI rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,APIResource,ShortNames\r\nAPI rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,APIResourceList,APIResources\r\nAPI rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,APIVersions,ServerAddressByClientCIDRs\r\nAPI rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,APIVersions,Versions\r\nAPI rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,ApplyOptions,DryRun\r\nAPI rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,CreateOptions,DryRun\r\n...", + "go clean -modcache\r\nrm $(which controller-gen)" + ] + } + }, + "metadata": { + "tags": [ + "clusternet", + "sandbox", + "app-definition", + "kind-feature" + ], + "category": "workloads", + "cncfProjects": [ + "clusternet" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/clusternet/clusternet/pull/635", + "sourceRepo": "clusternet/clusternet", + "reactions": 1, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:46:59.749Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/clusterpedia/clusterpedia-345-add-memory-storage-for-supporting-listandwatch.json b/solutions/cncf-generated/clusterpedia/clusterpedia-345-add-memory-storage-for-supporting-listandwatch.json new file mode 100644 index 00000000..56040b87 --- /dev/null +++ b/solutions/cncf-generated/clusterpedia/clusterpedia-345-add-memory-storage-for-supporting-listandwatch.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:01.994Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "clusterpedia: Add memory storage for supporting ListAndWatch", + "description": "Co-authored-by: duanmeng \nCo-authored-by: wuyingjun \nCo-authored-by: hanweisen \nSigned-off-by: zhangyongxi \n\n**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\nFixes #265\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n```release-note\n1. Add memory storage.\n2. Support ListAndWatch for Multi-cluster.\n```", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "/cc @Iceber \nMay be you guys can start first found review", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "clusterpedia", + "sandbox", + "orchestration", + "lgtm" + ], + "category": "troubleshooting", + "cncfProjects": [ + "clusterpedia" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/clusterpedia-io/clusterpedia/pull/345", + "sourceRepo": "clusterpedia-io/clusterpedia", + "reactions": 4, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:47:01.994Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/confidential-containers/confidential-containers-56-add-sc-expansion-protocol.json b/solutions/cncf-generated/confidential-containers/confidential-containers-56-add-sc-expansion-protocol.json new file mode 100644 index 00000000..36e449b1 --- /dev/null +++ b/solutions/cncf-generated/confidential-containers/confidential-containers-56-add-sc-expansion-protocol.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:05.045Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "confidential-containers: Add SC expansion protocol", + "description": "First draft of expansion procedure as discussed last week.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Ok, I have pushed a number of updates to this to incorporate comments, make certain things more explicit, and consider other types of membership changes besides expansion. PTAL.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "confidential-containers", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "confidential-containers" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/confidential-containers/confidential-containers/pull/56", + "sourceRepo": "confidential-containers/confidential-containers", + "reactions": 0, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:47:05.045Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-1007-update-netconf-dns-s-json-tag-to-omitempty.json b/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-1007-update-netconf-dns-s-json-tag-to-omitempty.json new file mode 100644 index 00000000..64f97130 --- /dev/null +++ b/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-1007-update-netconf-dns-s-json-tag-to-omitempty.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:25.524Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "container-network-interface-cni-: update NetConf.DNS's json tag to omitempty", + "description": "Fixes https://github.com/containernetworking/cni/issues/1006", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR changes DNS in NetConf/Results to pointer. This PR contains #1007 and DNS Result change commit.\n\nPlugin repo needs change: https://github.com/containernetworking/plugins/pull/964", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "type NetConf struct {\r\n\tCNIVersion string `json:\"cniVersion,omitempty\"`\r\n\tIPAM IPAM `json:\"ipam,omitempty\"`\r\n\tDNS DNS `json:\"dns,omitempty\"`\r\n}\r\n\r\ntype IPAM struct {\r\n\tType string `json:\"type,omitempty\"`\r\n}\r\n\r\ntype Test struct {\r\n\tName string `json:\"name,omitempty\"`\r\n}\r\n\r\nfunc main(){\r\n \tt := &test{\r\n\t CNIVersion: \"test\",\r\n\t}\r\n\r\n\tbytes,err := json.Marshal(t)\r\n\tif err != nil {\r\n\t\tlog.Fatalln(err)\r\n\t}\r\n\r\n\tlog.Println(string(bytes))\r\n}\r\n\r\n#cyclinder~ go run main.go\r\n2023/07/11 11:19:00 {\"ipam\":{},\"dns\":{},\"cniVersion\":\"test\"}", + "type NetConf struct {\r\n\tCNIVersion string `json:\"cniVersion,omitempty\"`\r\n\tIPAM *IPAM `json:\"ipam,omitempty\"`\r\n\tDNS *DNS `json:\"dns,omitempty\"`\r\n}\r\n\r\ntype IPAM struct {\r\n\tType string `json:\"type,omitempty\"`\r\n}\r\n\r\ntype Test struct {\r\n\tName string `json:\"name,omitempty\"`\r\n}\r\n\r\nfunc main(){\r\n \tt := &test{\r\n\t CNIVersion: \"test\",\r\n\t}\r\n\r\n\tbytes,err := json.Marshal(t)\r\n\tif err != nil {\r\n\t\tlog.Fatalln(err)\r\n\t}\r\n\r\n\tlog.Println(string(bytes))\r\n}\r\n\r\n#cyclinder~ go run main.go\r\n2023/07/11 11:21:00 {\"cniVersion\":\"test\"}" + ] + } + }, + "metadata": { + "tags": [ + "container-network-interface-cni-", + "incubating", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "container-network-interface-cni-" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containernetworking/cni/pull/1007", + "sourceRepo": "containernetworking/cni", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:46:25.524Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-290-fixed-the-check-for-network-namespace-path.json b/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-290-fixed-the-check-for-network-namespace-path.json new file mode 100644 index 00000000..4812383a --- /dev/null +++ b/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-290-fixed-the-check-for-network-namespace-path.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:26.819Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "container-network-interface-cni-: Fixed the check for network namespace path.", + "description": "The expectation on older kernels (< 3.19) was to have the network\nnamespace always be a directory. This is not true if the network\nnamespace is bind mounted to a file, and will make the plugin fail\nerroneously in such cases.\n\nI have tried this with Mesos on a CentOS 7 cluster, running 3.10 kernel and it works fine. Moreover, have also tried it on Ubuntu 14.04 which has 3.13 kernel and works fine on that as well. \n\nThis should fix #288", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fixed the namespace path check in `pkg/ns` package in the upstream `containernetworking/cni` repo:\nhttps://github.com/containernetworking/cni/pull/290\n\nUpdating the CNI plugins package to reflect this PR.\n\nFixes https://dcosjira.atlassian.net/browse/DCOS-300", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "container-network-interface-cni-", + "incubating", + "networking", + "low-hanging-fruit", + "needs-review", + "reviewed-lgtm" + ], + "category": "networking", + "cncfProjects": [ + "container-network-interface-cni-" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containernetworking/cni/pull/290", + "sourceRepo": "containernetworking/cni", + "reactions": 0, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:46:26.819Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-695-wip-set-process-group-id-when-fork-exec-so-c.json b/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-695-wip-set-process-group-id-when-fork-exec-so-c.json new file mode 100644 index 00000000..46d47dc9 --- /dev/null +++ b/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-695-wip-set-process-group-id-when-fork-exec-so-c.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:28.591Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "container-network-interface-cni-: [WIP] set process group ID when fork/exec so child processes terminate together", + "description": "Provide a experimental way to fix #687 \n\nWe let sub process called by invoke.Exec has the process group id same as its parent process id, so when the parent process is killed, all the sub processes will be killed in tree recursively.\n\nFox example, if we use libcni in CRI to call plugin **A**, and plugin **A** call plugin **B** and **C**, and plugin **B** call another plugin **D**, then the pid/pgid maybe like,\n\nprocess|pid|pgid\n-|-|-\n|CRI|100|120|\n|A|103|100|\n|B|105|103|\n|C|106|103|\n|D|108|105|\n\nIf CRI (pid==100) was killed, processes(A pid==103) with group id == 100 will be killed, and processes(B pid==105 C pid==106) with group id == 103 will be killed, and process D will be killed in the same way recursively.\nFor the same reason , if CRI try to kill process A, B&C&D will be killed recursively.\n\nSigned-off-by: Bruce Ma ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I think test has been broken by #535 , maybe we need **sudo** now, do you have some suggestions? @dcbw", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "container-network-interface-cni-", + "incubating", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "container-network-interface-cni-" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containernetworking/cni/pull/695", + "sourceRepo": "containernetworking/cni", + "reactions": 0, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:46:28.591Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-749-spec-add-extras-for-adding-additional-info-i.json b/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-749-spec-add-extras-for-adding-additional-info-i.json new file mode 100644 index 00000000..f1ea08cf --- /dev/null +++ b/solutions/cncf-generated/container-network-interface-cni-/container-network-interface-cni-749-spec-add-extras-for-adding-additional-info-i.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:27.730Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "container-network-interface-cni-: spec: add `extras` for adding additional info in result.", + "description": "This PR is going to fix #582 .\n\nIt was above 4 months since [we proposed a solution](https://github.com/containernetworking/cni/issues/582#issuecomment-540047157) for adding additional information inside result struct, therefore, I'd like to give a try, propose this PR for pushing this feature online.\n\nFeel free to reject this PR if we have another idea about how to fix this issue.\nThanks.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This will be helpful in CNI DEL for cleaning something like iptable rules/chains ..., but I'm worrying about whether this is a break change in SPEC, which means libcni and plugins both will need a version upgrade.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "r, err := ipam.ExecAdd(plugin, stdin)" + ] + } + }, + "metadata": { + "tags": [ + "container-network-interface-cni-", + "incubating", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "container-network-interface-cni-" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containernetworking/cni/pull/749", + "sourceRepo": "containernetworking/cni", + "reactions": 0, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:27.730Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-10177-multipart-layer-fetch.json b/solutions/cncf-generated/containerd/containerd-10177-multipart-layer-fetch.json new file mode 100644 index 00000000..e6745db9 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-10177-multipart-layer-fetch.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:44.639Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: Multipart layer fetch", + "description": "TLDR: this makes pulls of big images ~2x faster (edit: and a bit more in the latest iteration), and closes #9922. \n\ncc: #8160, #4989\n___ \n\nHello Containerd People, I have this draft PR I would like to get your eyes on.\n\nIt basically makes pulls faster, but also tries to have not such a big memory impact, by getting consecutive chunks of the layers and immediately pushing them in the pipe (that writes to a file + that signature checksum thing).\nI noticed it made pulls ~2x faster, when using the correct settings.\n\nThe settings have a big impact, and so I did a bunch of perf tests with different settings, here are some results on a ~8GB image using a `r6id.4xlarge` instance, pulling it from s3.\nGains are somewhat similar on a ~27GB and a ~100GB image (with a little tiny bit of slowdown)\nI also tried on an nvme, and a ebs drives, they are ofc slower but gains are still the same.\n\n___\n\nMetrics on a `r6id.4xlarge` timing `crictl pull` of a 8.6GB image.\n\nThe first one with 13 tries is with 0 ", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "dst agv_time count(*)\r\n----- ---------------- --------\r\ntmpfs 44.0761538461539 13 \r\n\r\ndst c_para chunk_size_b ctd_max_con agv_time count(*)\r\n----- ------ ------------ ----------- -------- --------\r\ntmpfs 110 32 3 22.625 4 \r\ntmpfs 100 32 3 22.64 5 \r\ntmpfs 130 32 2 22.76 1 \r\ntmpfs 120 32 4 22.824 5 \r\ntmpfs 110 32 2 22.85 1 \r\ntmpfs 80 32 4 22.99 1 \r\ntmpfs 110 32 4 23.018 5 \r\ntmpfs 90 64 4 23.09 1 \r\ntmpfs 90 32 3 23.18 1 \r\ntmpfs 110 64 3 23.2125 4 \r\ntmpfs 80 64 3 23.29 1 \r\ntmpfs 90 64 3 23.32 1 \r\ntmpfs 100 32 4 23.352 5 \r\ntmpfs 70 15 4 23.4 1 \r\ntmpfs 100 64 3 23.65 5 \r\ntmpfs 120 15 3 23.68 1 \r\ntmpfs 110 64 2 23.74 1 \r\ntmpfs 100 64 4 23.77 5 \r\ntmpfs 70 32 4 23.81 5 \r\ntmpfs 120 32 3 23.83 5\r\n[...]", + "dst agv_time count(*)\r\n---------- ---------------- --------\r\nadded-nvme 47.4008333333333 12 \r\n\r\ndst c_para chunk_size_mb ctd_max_con agv_time count(*)\r\n---------- ------ ------------ ----------- -------- --------\r\nadded-nvme 130 32 3 25.24 1 \r\nadded-nvme 70 32 4 26.1 1 \r\nadded-nvme 80 32 3 26.31 1 \r\nadded-nvme 100 32 3 26.38 1 \r\nadded-nvme 120 32 4 26.58 1 \r\nadded-nvme 130 32 2 26.71 1 \r\nadded-nvme 80 32 4 26.73 1 \r\nadded-nvme 120 10 3 26.82 1 \r\nadded-nvme 80 64 3 26.93 1", + "Total Image size: 8.6 GB -rw-r--r-- 1000:1000 205 B │ │ │ └── README\r\nPotential wasted space: 34 MB drwxr-xr-x 1000:1000 319 B │ │ ├── Xresources\r\nImage efficiency score: 99 %" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "impact-changelog", + "ok-to-test", + "kind-performance", + "size-xl", + "area-distribution" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/containerd/containerd/pull/10177", + "sourceRepo": "containerd/containerd", + "reactions": 25, + "comments": 40 + }, + "security": { + "scannedAt": "2026-02-27T17:44:44.639Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-10274-export-removevolatileoption-for-cri-image-volumes.json b/solutions/cncf-generated/containerd/containerd-10274-export-removevolatileoption-for-cri-image-volumes.json new file mode 100644 index 00000000..71caf587 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-10274-export-removevolatileoption-for-cri-image-volumes.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:03.872Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: *: export RemoveVolatileOption for CRI image volumes", + "description": "Remove volatile option when CRI prepares image volumes.\n\nFixes: #10228", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "```\n=== RUN TestContainerdImage\n containerd_image_test.go:44: make sure the test image doesn't exist in the cri plugin\n containerd_image_test.go:51: pull the image into containerd\n containerd_image_test.go:63: the image should be seen by the cri plugin\n containerd_image_test.go:120: the image should be marked as managed\n containerd_image_test.go:125: the image id should be created and managed\n containerd_image_test.go:130: the image should be labeled\n containerd_image_test.go:136: the image should be pinned\n containerd_image_test.go:142: should be able to start container with the image\nE0528 15:19:22.528757 70056 remote_runtime.go:243] CreateContainer in sandbox \"fa1e16854f1785618f13fb02da469f10a31537ab6e86bdf34163f32d7d5d0b7a\" from runtime service failed: rpc error: code = NotFound desc = failed to create containerd container: error unpacking image: apply layer error for \"ghcr.io/containerd/busybox:1.36\": failed to extract layer sha256:869e6058ea58994b7c023f0f0a80f6dbb672ffc1cc61ef6c272f8dd573a76cc9: failed to get reader from content store: content digest sha256:f78e6840ded1aafb6c9f265f52c2fc7c0a990813ccf96702df84a7dcdbe48bea: not found\n containerd_image_test.go:151: \n \tError Trace:\t/home/runner/actions-runner/_work/containerd/containerd/integration/containerd_image_test.go:151\n \tError: \tReceived unexpected error:\n \t \trpc error: code = NotFound desc = failed to create containerd container: error unpacking im", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "=== RUN TestContainerdImage\r\n containerd_image_test.go:44: make sure the test image doesn't exist in the cri plugin\r\n containerd_image_test.go:51: pull the image into containerd\r\n containerd_image_test.go:63: the image should be seen by the cri plugin\r\n containerd_image_test.go:120: the image should be marked as managed\r\n containerd_image_test.go:125: the image id should be created and managed\r\n containerd_image_test.go:130: the image should be labeled\r\n containerd_image_test.go:136: the image should be pinned\r\n containerd_image_test.go:142: should be able to start container with the image\r\nE0528 15:19:22.528757 70056 remote_runtime.go:243] CreateContainer in sandbox \"fa1e16854f1785618f13fb02da469f10a31537ab6e86bdf34163f32d7d5d0b7a\" from runtime service failed: rpc error: code = NotFound desc = failed to create containerd container: error unpacking image: apply layer error for \"ghcr.io/containerd/busybox:1.36\": failed to extract layer sha256:869e6058ea58994b7c023f0f0a80f6dbb672ffc1cc61ef6c272f8dd573a76cc9: failed to get reader from content store: content digest sha256:f78e6840ded1aafb6c9f265f52c2fc7c0a990813ccf96702df84a7dcdbe48bea: not found\r\n containerd_image_test.go:151: \r\n \tError Trace:\t/home/runner/actions-runner/_work/containerd/containerd/integration/containerd_image_test.go:151\r\n \tError: \tReceived unexpected error:\r\n \t \trpc error: code = NotFound desc = failed to create containerd container: error unpacking image: apply layer error for \"ghcr.io/containerd/busybox:1.36\": failed to extract layer sha256:869e6058ea58994b7c023f0f0a80f6dbb672ffc1cc61ef6c272f8dd573a76cc9: failed to get reader from content store: content digest sha256:f78e6840ded1aafb6c9f265f52c2fc7c0a990813ccf96702df84a7dcdbe48bea: not found\r\n \tTest: \tTestContainerdImage\r\n containerd_image_test.go:96: image should still be seen by id if only tag get deleted\r\n containerd_image_test.go:107: image should be removed from the cri plugin if all references get deleted" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "cherry-picked-1-6-x", + "cherry-picked-1-7-x", + "size-l" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containerd/containerd/pull/10274", + "sourceRepo": "containerd/containerd", + "reactions": 3, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:45:03.872Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-10579-add-oci-image-volume-source-support.json b/solutions/cncf-generated/containerd/containerd-10579-add-oci-image-volume-source-support.json new file mode 100644 index 00000000..20e6d988 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-10579-add-oci-image-volume-source-support.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:43.218Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: Add OCI/Image Volume Source support", + "description": "Fixed #10496", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "default: + rm -rf /var/lib/containerd-test /run/containerd /run/containerd-test /tmp/containerd-config-cri.toml /tmp/test-integration /tmp/failpoint-cni-net.d /tmp/nri\r\ndefault: rm: cannot remove '/var/lib/containerd-test/io.containerd.grpc.v1.cri/image-volume/ee6521f290b2168b6e0935a181d4cff9be1ac3f505666ef0e3c98fae8199917a': Device or resource busy", + "> default: + rm -rf /var/lib/containerd-test /run/containerd /run/containerd-test /tmp/containerd-config-cri.toml /tmp/test-integration /tmp/failpoint-cni-net.d /tmp/nri\r\n> default: rm: cannot remove '/var/lib/containerd-test/io.containerd.grpc.v1.cri/image-volume/ee6521f290b2168b6e0935a181d4cff9be1ac3f505666ef0e3c98fae8199917a': Device or resource busy\r\n>", + "[plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes]\r\n [plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes.runc]\r\n runtime_type = 'io.containerd.runc.v2'\r\n snapshotter = ''" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "impact-changelog", + "kind-feature", + "area-cri", + "ok-to-test", + "size-l" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/containerd/containerd/pull/10579", + "sourceRepo": "containerd/containerd", + "reactions": 28, + "comments": 45 + }, + "security": { + "scannedAt": "2026-02-27T17:44:43.218Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-3085-shim-pluggable-logging.json b/solutions/cncf-generated/containerd/containerd-3085-shim-pluggable-logging.json new file mode 100644 index 00000000..e2ff1aec --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-3085-shim-pluggable-logging.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:49.340Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: Shim pluggable logging", + "description": "Closes #603\n\nThis adds logging facilities at the shim level to provide minimal I/O\noverhead and pluggable logging options. Log handling is done within the\nshim so that all I/O, cpu, and memory can be charged to the container.\n\nA sample logging driver setting up logging for a container the systemd\njournal looks like this:\n\n```go\npackage main\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\n\t\"github.com/containerd/containerd/runtime/v2/logging\"\n\t\"github.com/coreos/go-systemd/journal\"\n)\n\nfunc main() {\n\tlogging.Run(log)\n}\n\nfunc log(ctx context.Context, config *logging.Config, ready func() error) error {\n\t// construct any log metadata for the container\n\tvars := map[string]string{\n\t\t\"SYSLOG_IDENTIFIER\": fmt.Sprintf(\"%s:%s\", config.Namespace, config.ID),\n\t}\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\t// forward both stdout and stderr to the journal\n\tgo copy(&wg, config.Stdout, journal.PriInfo, vars)\n\tgo copy(&wg, config.Stderr, journal.PriErr, vars)\n\n\t// signal that we are ready and setup for the ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Backport of for the release/1.2 branch\n\nfirst commit didn't apply clean due to https://github.com/containerd/containerd/pull/3085 not being in the 1.2 branch", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "A `logging` package has been created to assist log developers create\r\nlogging plugins for containerd.\r\n\r\nThis uses a URI based approach for logging drivers that can be expanded\r\nin the future.\r\n\r\nSupported URI scheme's are:\r\n\r\n* binary\r\n* fifo\r\n* file\r\n\r\nYou can pass the log url via ctr on the command line:", + "The following client side Opts are added:" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/containerd/containerd/pull/3085", + "sourceRepo": "containerd/containerd", + "reactions": 6, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:44:49.340Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-4204-make-killing-shims-more-resilient.json b/solutions/cncf-generated/containerd/containerd-4204-make-killing-shims-more-resilient.json new file mode 100644 index 00000000..36c5c48a --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-4204-make-killing-shims-more-resilient.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:00.569Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: Make killing shims more resilient", + "description": "Currently, we send a single SIGKILL to the shim process\nonce and then we spin in a loop where we use kill(pid, 0)\nto detect when the pid has disappeared completely.\n\nUnfortunately, this has a race condition since pids can be reused causing us \nto spin in an infinite loop when that happens.\n\nThis adds a timeout to this loop which logs a warning and exits the infinite loop.\n\nThis fixes https://github.com/containerd/cri/issues/1427", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "let's try this again", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ pidof containerd\r\n1567\r\n$ strace -fp 1567 -e trace=kill\r\nstrace: Process 1567 attached with 301 threads\r\n[pid 13252] kill(13737, SIG_0) = 0\r\n[pid 11864] kill(13737, SIG_0) = 0\r\n[pid 6042] kill(13737, SIG_0) = 0\r\n[pid 11864] kill(13737, SIG_0) = 0\r\n[pid 6042] kill(13737, SIG_0) = 0\r\n[pid 7318] kill(13737, SIG_0) = 0\r\n[pid 8004] kill(13737, SIG_0) = 0\r\n[pid 11864] kill(13737, SIG_0) = 0\r\n[pid 8004] kill(13737, SIG_0) = 0\r\n[pid 20957] kill(13737, SIG_0) = 0\r\n[pid 11864] kill(13737, SIG_0) = 0\r\n[pid 11864] kill(13737, SIG_0) = 0\r\n[pid 6042] kill(13737, SIG_0) = 0\r\n[pid 6042] kill(13737, SIG_0) = 0\r\n...", + "$ ps aux | grep 13737\r\nroot 13737 0.0 0.0 110356 6576 ? Sl 07:18 0:00 containerd-shim -namespace k8s.io -workdir /var/lib/container-runtime/containerd/io.containerd.runtime.v1.linux/k8s.io/051b2f90455d240b29f3130682e851c0e7fd6bc0e64121b5e77bcfb516c49b18 -address /run/containerd/containerd.sock -containerd-binary /usr/local/bin/containerd", + "| Flag | Coverage Δ | |\n|---|---|---|\n| #windows | `38.34% <ø> (ø)` | |\n\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/containerd/containerd/pull/4204?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/containerd/containerd/pull/4204?src=pr&el=footer). Last update [e094d36...c3d0845](https://codecov.io/gh/containerd/containerd/pull/4204?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\nIs this problem also relevant to the v2 shim?\nI spent some time trying to look into that late last week; my understanding is that the v2/ code doesn't have this same flow of waiting for a complete exit.\r\n\r\nMy problem with the PR is that it effectively is changing the semantics of \"waitForExit\" to \"keepTryingToKillForAwhile\" :) I'm not sure if @crosbymichael has had a chance to look at this as he is more intimately familiar with this code.\n@estesp to provide some more info here. We ran the killsnoop bpf tool (https://github.com/iovisor/bcc/blob/master/tools/killsnoop.py) to inspect and track all the `kill` signals being sent by containerd and the responses they were getting from the kenel.\r\n\r\nHere is the output from an occurrence of this issue." + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [], + "difficulty": "expert", + "sourceIssue": "https://github.com/containerd/containerd/pull/4204", + "sourceRepo": "containerd/containerd", + "reactions": 3, + "comments": 43 + }, + "security": { + "scannedAt": "2026-02-27T17:45:00.569Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-4575-set-resolver-to-pull-the-image-from-next-registry.json b/solutions/cncf-generated/containerd/containerd-4575-set-resolver-to-pull-the-image-from-next-registry.json new file mode 100644 index 00000000..14c639ce --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-4575-set-resolver-to-pull-the-image-from-next-registry.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:53.558Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: Set resolver to pull the image from next registry", + "description": "Fixes #4531, and likely https://github.com/containerd/cri/issues/1419#issuecomment-616995422 is related as well.\n\nSeems like retry currently works only for 404. Not sure if this is the best way to fix it but I tried to follow previous discussions (#3850 & #3868) and understood that retry always on error would be the way to go.\n\nLet me know what you think.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR is based on https://github.com/containerd/containerd/pull/4575#issuecomment-760631425.\n\nFixes #4531\nCloses #4575", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "if resp.StatusCode > 299 {\r\n\t\t\t\t// in case of error try next host\r\n\t\t\t\tif resp.StatusCode > 399 {\r\n\t\t\t\t\tif resp.StatusCode != http.StatusNotFound {\r\n\t\t\t\t\t\tlastErr = errors.Errorf(\"unexpected status code %v: %v\", u, resp.Status)\r\n\t\t\t\t\t}\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t}\r\n\t\t\t\treturn \"\", ocispec.Descriptor{}, errors.Errorf(\"unexpected status code %v: %v\", u, resp.Status)\r\n\t\t\t}", + "> \t\t\tif resp.StatusCode > 299 {\r\n> \t\t\t\t// in case of error try next host\r\n> \t\t\t\tif resp.StatusCode > 399 {\r\n> \t\t\t\t\tif resp.StatusCode != http.StatusNotFound {\r\n> \t\t\t\t\t\tlastErr = errors.Errorf(\"unexpected status code %v: %v\", u, resp.Status)\r\n> \t\t\t\t\t}\r\n> \t\t\t\t\tcontinue\r\n> \t\t\t\t}\r\n> \t\t\t\treturn \"\", ocispec.Descriptor{}, errors.Errorf(\"unexpected status code %v: %v\", u, resp.Status)\r\n> \t\t\t}\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "ok-to-test" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containerd/containerd/pull/4575", + "sourceRepo": "containerd/containerd", + "reactions": 4, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:44:53.558Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-5828-shimv2-handle-sigint-sigterm.json b/solutions/cncf-generated/containerd/containerd-5828-shimv2-handle-sigint-sigterm.json new file mode 100644 index 00000000..178b180a --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-5828-shimv2-handle-sigint-sigterm.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:57.811Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: shimv2: handle sigint/sigterm", + "description": "This causes sigint/sigterm to trigger a shutdown of the shim.\nIt is needed because otherwise the v2 shim hangs system shutdown.\n\nFixes #5502", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@qiutongs \n\n> How did you verify locally?\n\nSent SIGTERM to the shim.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "ok-to-test" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containerd/containerd/pull/5828", + "sourceRepo": "containerd/containerd", + "reactions": 4, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:44:57.812Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-5859-support-quota-set-on-overlay-snapshot.json b/solutions/cncf-generated/containerd/containerd-5859-support-quota-set-on-overlay-snapshot.json new file mode 100644 index 00000000..50fc8508 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-5859-support-quota-set-on-overlay-snapshot.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:47.645Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: support quota set on overlay snapshot", + "description": "support set quota on overlay snapshot\n\nas discussed in issue [#3329](https://github.com/containerd/containerd/issues/3329)\n\nFixes https://github.com/containerd/containerd/issues/3329\n\nenable quota set by follows:\n\n1. Make sure the root directory of containerd is mounted with 'pquota'\n2. Set 'overlay' as the default snapshot and enable quota in 'overlay' config\n3. Set the default quota size in CRI\n\nconfig.toml like this\n```\n [plugins.'io.containerd.cri.v1.runtime']\n enable_selinux = false\n default_snapshot_quota_size = '2M' #+\n...\n [plugins.'io.containerd.snapshotter.v1.overlayfs']\n root_path = ''\n upperdir_label = false\n sync_remove = false\n slow_chown = false\n mount_options = []\n enable_quota = true #+\n```\n\ncheck containerd root mount info\n```\n# /etc/fstab\n...\n/dev/mapper/containerd_root /var/lib/containerd xfs defaults,pquota 0 0\n\n# mount ...\n/dev/mapper/containerd_root on /var/lib/containerd type xfs (rw,relatime,prjquota)\n```\n\ncheck contai", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Quota handling is a topic we should discuss at a high level first and possibly a good topic for a future community meeting. However, getting library support for setting project quotas across different backing FS makes sense to do now.\n\nAs an end feature, we need to discuss how it will be integrated with Kubernetes and how we can use it from our own tooling and go libraries. It seems backwards to have the cri layer here enable the quota then the snapshot set a static quota. I would think the quota would be set by the client adding a quota and the snapshotter enabling it via configuration and erroring out if the backing FS couldn't support it when enabled. The difficult part is figuring out from the client perspective whether quota is enabled so it knows whether it can avoid expensive ephemeral storage accounting (such as in the Kubelet).", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "[plugins.'io.containerd.cri.v1.runtime']\r\n enable_selinux = false\r\n default_snapshot_quota_size = '2M' #+\r\n...\r\n [plugins.'io.containerd.snapshotter.v1.overlayfs']\r\n root_path = ''\r\n upperdir_label = false\r\n sync_remove = false\r\n slow_chown = false\r\n mount_options = []\r\n enable_quota = true #+", + "# /etc/fstab\r\n...\r\n/dev/mapper/containerd_root /var/lib/containerd xfs defaults,pquota 0 0\r\n\r\n# mount ...\r\n/dev/mapper/containerd_root on /var/lib/containerd type xfs (rw,relatime,prjquota)", + "xfs_quota -x -c \"report -h\" /var/lib/containerd/" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "status-needs-discussion", + "size-xl" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containerd/containerd/pull/5859", + "sourceRepo": "containerd/containerd", + "reactions": 7, + "comments": 36 + }, + "security": { + "scannedAt": "2026-02-27T17:44:47.645Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-6150-feature-support-image-pull-progress-timeout.json b/solutions/cncf-generated/containerd/containerd-6150-feature-support-image-pull-progress-timeout.json new file mode 100644 index 00000000..9faa3c58 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-6150-feature-support-image-pull-progress-timeout.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:58.886Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: feature: support image pull progress timeout", + "description": "Kubelet sends the PullImage request without timeout, because the image size\n is unknown and timeout is hard to defined. The pulling request might run\n into 0B/s speed, if containerd can't receive any packet in that connection.\n For this case, the containerd should cancel the PullImage request.\n \n Although containerd provides ingester manager to track the progress of pulling\n request, for example `ctr image pull` shows the console progress bar, it needs\n more CPU resources to open/read the ingested files to get status.\n \n In order to support progress timeout feature with lower overhead, this\n patch uses http.RoundTripper wrapper to track active progress. That\n wrapper will increase active-request number and return the\n countingReadCloser wrapper for http.Response.Body. Each bytes-read\n can be count and the active-request number will be descreased when the\n countingReadCloser wrapper has been closed. For the progress tracker,\n it can check the", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/test all", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "=== Failed\r\n=== FAIL: gc/scheduler TestTrigger (0.01s)\r\n scheduler_test.go:135: GC wait timed out", + "container_stats_test.go:364: \r\n \tError Trace:\tcontainer_stats_test.go:364\r\n \tError: \tReceived unexpected error:\r\n \t \tunexpected stats length\r\n \t \tgithub.com/containerd/containerd/integration.TestContainerListStatsWithIdSandboxIdFilter.func4\r\n \t \t\t/home/runner/work/containerd/containerd/integration/container_stats_test.go:371\r\n \t \tgithub.com/containerd/containerd/integration.Eventually\r\n \t \t\t/home/runner/work/containerd/containerd/integration/main_test.go:324\r\n \t \tgithub.com/containerd/containerd/integration.TestContainerListStatsWithIdSandboxIdFilter\r\n \t \t\t/home/runner/work/containerd/containerd/integration/container_stats_test.go:364\r\n \t \ttesting.tRunner\r\n \t \t\t/opt/hostedtoolcache/go/1.17.3/x64/src/testing/testing.go:1259\r\n \t \truntime.goexit\r\n \t \t\t/opt/hostedtoolcache/go/1.17.3/x64/src/runtime/asm_amd64.s:1581\r\n \tTest: \tTestContainerListStatsWithIdSandboxIdFilter", + "time=\"2021-11-21T03:34:25.856933279Z\" level=error msg=\"unable to migrate tasks\" error=\"failed to remove `state` dir: remove /run/containerd-test/io.containerd.runtime-shim.v2.shim: directory not empty\"" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "impact-changelog" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containerd/containerd/pull/6150", + "sourceRepo": "containerd/containerd", + "reactions": 4, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:44:58.886Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-6702-cri-improve-image-pulling-performance.json b/solutions/cncf-generated/containerd/containerd-6702-cri-improve-image-pulling-performance.json new file mode 100644 index 00000000..8b5ab76c --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-6702-cri-improve-image-pulling-performance.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:46.270Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: CRI: improve image pulling performance", + "description": "### Background:\n\nWith current design, the content backend uses key-lock for long-lived\nwrite transaction. If the content reference has been marked for write\ntransaction, the other requestes on the same reference will fail fast with\nunavailable error. Since the metadata plugin is based on boltbd which\nonly supports single-writer, the content backend can't block or handle\nthe request too long. It requires the client to handle retry by itself,\nlike OpenWriter - backoff retry helper. But the maximum retry interval\ncan be up to 2 seconds. If there are several concurrent requestes fo the\nsame image, the waiters maybe wakeup at the same time and there is only\none waiter can continue. A lot of waiters will get into sleep and we will\ntake long time to finish all the pulling jobs and be worse if the image\nhas many more layers, which mentioned in issue #4937.\n\nAfter fetching, containerd.Pull API allows several hanlers to commit\nsame ChainID snapshotter but only one can be done successfully. Since", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "With the way things work right now, there's nothing stopping a parallel unpack of the exact\nsame layer to a snapshot. The first one to get committed will live on while the other(s) get garbage collected\nso in the end things work out, but regardless of this it's wasted work. The real issue is that while unpack\nshould be pretty cheap on Linux, the opposite is true for the Windows and lcow formats. Kicking off\n10 parallel pulls of the same image brings my 6 core machine to a halt and pushes 100% cpu utilization.\nWhat all of this ends up causing is exponentially slower parallel pull times for images that either share layers,\nor just pulling the same image.\n\nI'm not sure if this is a \"sound\" way to approach this, or if there's possibly a much easier way to go about this change. I tried\nto model it in a way that wouldn't disrupt things from a clients perspective, so the logic lives in the metadata snapshotter\nlayer. The gist of this change is if a new RemoteContext option is specified, the snapshotter now keeps track of what active\nsnapshots are \"in progress\". Any other snapshots that call Prepare with the same key as a snapshot that is already in progress\nwill now simply wait for one of two things to occur.\n1. The first active snapshot it's waiting on gets removed via `Remove` (so it was never committed). For this case there\nwas likely an error during setup for the first snapshot/unpack, so any waiters continue as normal for this branch and create a new snapshot.\n2. First active s", + "steps": [ + "The first active snapshot it's waiting on gets removed via `Remove` (so it was never committed). For this case there", + "First active s" + ], + "codeSnippets": [ + "Both content backoff retry and unnecessary unpack impacts the performance.\r\n\r\n### Solution:\r\n\r\nIntroduced the duplicate suppression in fetch and unpack context. The\r\ndeplicate suppression uses key-mutex and single-waiter-notify to support\r\nsingleflight. The caller can use the duplicate suppression in different\r\nPullImage handlers so that we can avoid unnecessary unpack and spin-lock\r\nin OpenWriter.\r\n\r\n### Test Result:\r\n\r\n#### Before enhancement:", + "#### With this enhancement:", + "### Test Script:\r\n\r\nlocalhost:5000/{redis|golang}:latest is equal to\r\ndocker.io/library/{redis|golang}:latest. The image is hold in local registry\r\nservice by `docker run -d -p 5000:5000 --name registry registry:2`." + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "area-cri", + "kind-performance" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [ + "Service", + "Job" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containerd/containerd/pull/6702", + "sourceRepo": "containerd/containerd", + "reactions": 18, + "comments": 24 + }, + "security": { + "scannedAt": "2026-02-27T17:44:46.270Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-7313-add-metrics-for-image-pulling-error-in-progress-count-thoughput.json b/solutions/cncf-generated/containerd/containerd-7313-add-metrics-for-image-pulling-error-in-progress-count-thoughput.json new file mode 100644 index 00000000..a2162c24 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-7313-add-metrics-for-image-pulling-error-in-progress-count-thoughput.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:54.900Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: add metrics for image pulling: error; in progress count; thoughput ", + "description": "/cc @cpuguy83 \nFixes #7241\n\nThis pr tries to add some metrics for image pulling.\n1. Guage: in-progress pulls counts\n2. Counter: image pull error counts\n- Should this count by registry or image name?\n- Should we add the failure reason as a metric label?\n3. Histogram: throughout(image pull time for 1 MiB) group by the registry\nimage throughout may be related to CPU/disk io/network.\n\nFurther options to add for image pulling:\n1. max_concurrent_downloads: default 3. This may be related to the image pulling speed. https://github.com/containerd/containerd/pull/2920\n2. Histogram: [Need Discussion] image-pull time group by the registry(maybe not proper as size may be different.)\n3. Histogram: image size\n4. image_pull_progress_timeout of contained configuration(default 1m0s)\n\nTODO(some unfinished work in this PR)\n- [x] add repo as label\n- [ ] the current thoughtput is not taking `already exist layers or image` into account. (This means the metrics will show a quick pulling for existing images", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "```\n * b8de211b \"renames; image pull count all and add registry/status as label\" ... FAIL\n - PASS - commit does not have any whitespace errors\n - FAIL - does not have a valid DCO\n - PASS - commit subject is 72 characters or less! *yay*\n```\n\nCommit b8de211b is missing a the `Signed-off-by` line, which is causing the project checks in CI to fail. Please sign off on this commit as per the [contribution guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md#sign-your-work).", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# HELP containerd_cri_image_pulling_error_total error count of image pulling by image name\r\n# TYPE containerd_cri_image_pulling_error_total counter\r\ncontainerd_cri_image_pulling_error_total{image_name=\"daocloud.io/centos:7.3\"} 1\r\ncontainerd_cri_image_pulling_error_total{image_name=\"daocloud.io/centos:8\"} 1\r\ncontainerd_cri_image_pulling_error_total{image_name=\"daocloud.io/daocloud/dce-engine:4.0.8\"} 1\r\ncontainerd_cri_image_pulling_error_total{image_name=\"daocloud.io/daocloud/sprint-boot\"} 1\r\ncontainerd_cri_image_pulling_error_total{image_name=\"daocloud.io/ubuntu1\"} 1\r\n# HELP containerd_cri_image_pulling_in_progress_total in progress pulls\r\n# TYPE containerd_cri_image_pulling_in_progress_total gauge\r\ncontainerd_cri_image_pulling_in_progress_total 1\r\n# HELP containerd_cri_image_pulling_thoughtput_seconds image pulling duration for 1MiB\r\n# TYPE containerd_cri_image_pulling_thoughtput_seconds histogram\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"0.005\"} 1\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"0.01\"} 1\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"0.025\"} 1\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"0.05\"} 1\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"0.1\"} 1\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"0.25\"} 1\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"0.5\"} 1\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"1\"} 2\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"2.5\"} 2\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"5\"} 2\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"10\"} 2\r\ncontainerd_cri_image_pulling_thoughtput_seconds_bucket{registry=\"daocloud.io\",le=\"+Inf\"} 2\r\ncontainerd_cri_image_pulling_thoughtput_seconds_sum{registry=\"daocloud.io\"} 1\r\ncontainerd_cri_image_pulling_thoughtput_seconds_count{registry=\"daocloud.io\"} 2", + "* b8de211b \"renames; image pull count all and add registry/status as label\" ... FAIL\r\n - PASS - commit does not have any whitespace errors\r\n - FAIL - does not have a valid DCO\r\n - PASS - commit subject is 72 characters or less! *yay*", + "critest version: 1.25.0\r\nYou're using deprecated Ginkgo functionality:\r\n=============================================\r\n Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:\r\n Learn more at: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters\r\n Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.\r\n Learn more at: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-measure\r\n github.com/kubernetes-sigs/cri-tools/pkg/benchmark/pod_container.go:60\r\n\r\nTo silence deprecations that can be silenced set the following environment variable:\r\n ACK_GINKGO_DEPRECATIONS=2.1.4\r\n\r\n--- FAIL: TestCRISuite (88.03s)\r\nFAIL" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "area-cri", + "needs-ok-to-test" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containerd/containerd/pull/7313", + "sourceRepo": "containerd/containerd", + "reactions": 4, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:44:54.900Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-7985-pushwriter-correctly-propagate-errors.json b/solutions/cncf-generated/containerd/containerd-7985-pushwriter-correctly-propagate-errors.json new file mode 100644 index 00000000..75f011e0 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-7985-pushwriter-correctly-propagate-errors.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:52.150Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: pushWriter: correctly propagate errors", + "description": ":bug: Fixes https://github.com/containerd/containerd/issues/7972\n\nIn the refactor from #6995, the error handling was substantially reworked, and changed the types of errors returned - this caused issues in retry logic downstream in BuildKit (see https://github.com/docker/build-push-action/issues/761). BuildKit uses this error result from this function [to determine whether to retry the push or not](https://github.com/jedevc/buildkit/blob/40bc1b316f6ce2be69ed522dc6d531ef1d3323e8/util/resolver/retryhandler/retry.go#L54-L76).\n\nNotably, in the case of a network error, instead of propagating the error through to return from `pushWriter.Write` (as previously), it would be propagated through to `pushWriter.Commit` - however, this is too late, since we've already closed the `io.Pipe` by the time we would have reached this function. Therefore, we get the generic error message `\"io: read/write on closed pipe\"` for *every network error* - this seems to be the issue in https://github.com/container", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "- backport of https://github.com/containerd/containerd/pull/7985\n- fixes https://github.com/containerd/containerd/issues/7972\n- relates to https://github.com/containerd/containerd/pull/6995\n- relates to https://github.com/docker/build-push-action/issues/761\n\nIn the refactor from 926b9c72f61b5be6bf8d952512f1d0932fbaf898, the error handling was substantially reworked, and changed the types of errors returned.\n\nNotably, in the case of a network error, instead of propogating the error through to return from pushWriter.Write (as previously), it would be propagated through to pushWriter.Commit - however, this is too late, since we've already closed the io.Pipe by the time we would have reached this function. Therefore, we get the generic error message \"io: read/write on closed pipe\" for *every network error*.\n\nThis patch corrects this behavior to ensure that the correct error object is always returned as early as possible, by checking the error result after writing and detecting a closed pipe.\n\nAdditionally, we do some additional hardening - specifically we prevent falling through when resetting the content or detecting errors, and update the tests to explicitly check for the ErrReset message.\n\n(cherry picked from commit 9f6058d029ebffdcd6a8a954143cd4070f62f720)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "ok-to-test", + "cherry-picked-1-5-x", + "cherry-picked-1-6-x" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [ + "Job" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containerd/containerd/pull/7985", + "sourceRepo": "containerd/containerd", + "reactions": 5, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:52.150Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-8287-add-support-for-userns-in-stateless-and-stateful-pods-with-idmap.json b/solutions/cncf-generated/containerd/containerd-8287-add-support-for-userns-in-stateless-and-stateful-pods-with-idmap.json new file mode 100644 index 00000000..c43fed1a --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-8287-add-support-for-userns-in-stateless-and-stateful-pods-with-idmap.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:02.685Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "containerd: Add support for userns in stateless and stateful pods with idmap mounts (KEP-127, k8s >= 1.27)", + "description": "This adds support in containerd for k8s stateless and stateful pods with user namespaces as implemented in k8s >= 1.27. Kubernetes 1.28 added stateful pod support, but no other changes are needed in containerd, we just use idmap mounts for all volumes (stateless, like configmaps, or stateful, like hostPath volumes).\n\nWe have some requirements:\n * The filesystems should support idmap mounts. The most late adition was tmpfs, that we merged support in Linux 6.3 for idmap mounts, so in practice you will need Linux 6.3 for most stateless pods to work with userns.\n * The OCI runtime needs to support idmap mounts too.\n\nThis requires runc with this PR applied to support idmap mounts: https://github.com/opencontainers/runc/pull/3717. This is expected to be part of runc 1.2 (yet to be released). I'm changing the runc version to that commit, we can update it again when this is included in a release.\n\nI also check if idmap mounts are used, then if the runc version supports that, to avoid silently ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "\n\n

Hi,
this is this is initial support of idmapped mount points in containerd. The original PR was published by @mauriciovasquezbernal here \n\n1. `Credential Client` Implementation `CredentialHelper` interface. \n2. When call `NewOCIRegistry` this method, use `NewProxyCredentialsClient` to create a `CredentialHelper` instance.\n3. When `Transfer Server` callback to `CredentialHelper ` method, `Credential Client` can call `Credential Server` to get current host credential.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi @lengrongfu. Thanks for your PR.\n\nI'm waiting for a [containerd](https://github.com/orgs/containerd/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/containerd/project/blob/main/MAINTAINERS) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=containerd%2Fcontainerd).\n\n

\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "needs-ok-to-test", + "stale", + "needs-rebase", + "size-xxl", + "do-not-merge-work-in-progress" + ], + "category": "runtime", + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [ + "Service", + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containerd/containerd/pull/9872", + "sourceRepo": "containerd/containerd", + "reactions": 6, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:50.497Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/continuous-optimization/continuous-optimization-10-added-node-chaos-scenarios.json b/solutions/cncf-generated/continuous-optimization/continuous-optimization-10-added-node-chaos-scenarios.json new file mode 100644 index 00000000..8fc4e382 --- /dev/null +++ b/solutions/cncf-generated/continuous-optimization/continuous-optimization-10-added-node-chaos-scenarios.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:10.524Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "continuous-optimization: Added node chaos scenarios", + "description": "This commit:\n- Adds a node scenario to stop and start an instance\n- Adds a node scenario to terminate an instance\n- Adds a node scenario to reboot an instance\n- Adds a node scenario to stop the kubelet\n- Adds a node scenario to crash the node\n\nFixes: #8", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This covers the stop kubelet and node crash scenarios Mike mentioned in issue: https://github.com/openshift-scale/kraken/issues/8\n\nI based a lot of my more general set up of the node kill scenario yaml file from the below and this could be combined at some point https://github.com/openshift-scale/kraken/pull/10/files \n\nThese 2 scenarios are not cloud specific. \nThis is a first pass of stopping kubelet. I was hoping to add the ability to stop, wait, and then restart of kubelet but I was not able to get it to work properly.\nI also had to find a separate command then the one Mike mentioned for the fork bomb scenario.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Try invoking oc debug node/ip-10-0-203-2.us-east-2.compute.internal -- chroot /host dd if=/dev/urandom of=/proc/sysrq-trigger\r\n2020-06-23 11:01:31,957 [INFO] Scenario: {'node_scenarios': [{'name': 'Fork bomb the node', 'actions': ['node_crash'], 'label_selector': 'node-role.kubernetes.io/worker', 'instance_kill_count': 1, 'timeout': 20, 'cloud_type': 'aws'}]} has been successfully injected!\r\n2020-06-23 11:01:31,957 [INFO] Waiting for the specified duration: 60", + "(venv3) prubenda@prubenda-mac kraken % oc get nodes\r\nNAME STATUS ROLES AGE VERSION\r\nip-10-0-132-58.us-east-2.compute.internal Ready worker 125m v1.18.3+91d0edd\r\nip-10-0-138-72.us-east-2.compute.internal Ready master 136m v1.18.3+91d0edd\r\nip-10-0-176-148.us-east-2.compute.internal Ready master 136m v1.18.3+91d0edd\r\nip-10-0-183-154.us-east-2.compute.internal Ready worker 126m v1.18.3+91d0edd\r\nip-10-0-203-2.us-east-2.compute.internal NotReady worker 126m v1.18.3+91d0edd\r\nip-10-0-210-102.us-east-2.compute.internal Ready master 135m v1.18.3+91d0edd" + ] + } + }, + "metadata": { + "tags": [ + "continuous-optimization", + "sandbox", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "continuous-optimization" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/krkn-chaos/krkn/pull/10", + "sourceRepo": "krkn-chaos/krkn", + "reactions": 1, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:47:10.524Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/contour/contour-2890-internal-add-httproxy-cors-support.json b/solutions/cncf-generated/contour/contour-2890-internal-add-httproxy-cors-support.json new file mode 100644 index 00000000..fb718fd6 --- /dev/null +++ b/solutions/cncf-generated/contour/contour-2890-internal-add-httproxy-cors-support.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:25.602Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "contour: internal: add HTTProxy CORS support", + "description": "Closes #437", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@aberasarte I'll take another look through this afternoon; thanks for the updates!", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "contour", + "incubating", + "networking", + "release-note" + ], + "category": "networking", + "cncfProjects": [ + "contour" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/projectcontour/contour/pull/2890", + "sourceRepo": "projectcontour/contour", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:43:25.602Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/contour/contour-5008-httpproxy-add-support-for-ip-based-filtering.json b/solutions/cncf-generated/contour/contour-5008-httpproxy-add-support-for-ip-based-filtering.json new file mode 100644 index 00000000..5dde551c --- /dev/null +++ b/solutions/cncf-generated/contour/contour-5008-httpproxy-add-support-for-ip-based-filtering.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:23.345Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "contour: httpproxy: Add support for ip-based filtering", + "description": "Configures Envoy's `envoy.filters.http.rbac` per route via HTTPProxy.\n\nSee #4990 for design \n\nFixes #3693", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I took a stab at a proposal to address https://github.com/projectcontour/contour/issues/3693\n\nI haven't looked deeply at the envoy filters available for this, but I wrote down what looked like would work based on the docs.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "contour", + "incubating", + "networking", + "release-note-major" + ], + "category": "networking", + "cncfProjects": [ + "contour" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/projectcontour/contour/pull/5008", + "sourceRepo": "projectcontour/contour", + "reactions": 7, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:43:23.345Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/contour/contour-5043-add-tracing-support.json b/solutions/cncf-generated/contour/contour-5043-add-tracing-support.json new file mode 100644 index 00000000..ce69ab88 --- /dev/null +++ b/solutions/cncf-generated/contour/contour-5043-add-tracing-support.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:20.255Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "contour: Add tracing support", + "description": "Add support for exporting tracing data to opentelemetry\nclose #399 \n\nSigned-off-by: yy [yang.yang@daocloud.io]", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@skriss any suggestions?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/projectcontour/contour/pull/5043?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour) | Coverage Δ | |\n|---|---|---|\n| [cmd/contour/serve.go](https://codecov.io/gh/projectcontour/contour/pull/5043?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-Y21kL2NvbnRvdXIvc2VydmUuZ28=) | `20.25% <0.00%> (-1.53%)` | :arrow_down: |\n| [internal/xdscache/v3/listener.go](https://codecov.io/gh/projectcontour/contour/pull/5043?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-aW50ZXJuYWwveGRzY2FjaGUvdjMvbGlzdGVuZXIuZ28=) | `89.75% <66.66%> (-2.38%)` | :arrow_down: |\n| [pkg/config/parameters.go](https://codecov.io/gh/projectcontour/contour/pull/5043?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-cGtnL2NvbmZpZy9wYXJhbWV0ZXJzLmdv) | `87.37% <90.62%> (+0.39%)` | :arrow_up: |\n| [internal/envoy/v3/tracing.go](https://codecov.io/gh/projectcontour/contour/pull/5043?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-aW50ZXJuYWwvZW52b3kvdjMvdHJhY2luZy5nbw==) | `93.33% <93.33%> (ø)` | |\n| [cmd/contour/servecontext.go](https://codecov.io/gh/projectcontour/contour/pull/5043?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-Y21kL2NvbnRvdXIvc2VydmVjb250ZXh0Lmdv) | `83.02% <100.00%> (+0.95%)` | :arrow_up: |\n| [internal/envoy/v3/listener.go](https://codecov.io/gh/projectcontour/contour/pull/5043?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-aW50ZXJuYWwvZW52b3kvdjMvbGlzdGVuZXIuZ28=) | `98.42% <100.00%> (+0.01%)` | :arrow_up: |\n\n... and [4 files with indirect coverage changes](https://codecov.io/gh/projectcontour/contour/pull/5043/indirect-changes?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour)\n\n
\n@skriss any suggestions?\r\n\nWhat is remaining to be done before this PR can be merged?\n@yangyy93 it looks like a changelog file is missing:", + "OMG is this happening!? Contour is wonderful except for the tracing support, it’s got everything I need. Thank you for this.\nFYI here's a very basic config that folks can use for testing:\r\n\r\nInstall otel operator:", + "Install an otel collector instance, with verbose logging exporter enabled:" + ] + } + }, + "metadata": { + "tags": [ + "contour", + "incubating", + "networking", + "release-note-major" + ], + "category": "networking", + "cncfProjects": [ + "contour" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/projectcontour/contour/pull/5043", + "sourceRepo": "projectcontour/contour", + "reactions": 8, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:43:20.256Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/contour/contour-5101-internal-provisioner-expose-the-metrics-port-for-envoy.json b/solutions/cncf-generated/contour/contour-5101-internal-provisioner-expose-the-metrics-port-for-envoy.json new file mode 100644 index 00000000..91df1080 --- /dev/null +++ b/solutions/cncf-generated/contour/contour-5101-internal-provisioner-expose-the-metrics-port-for-envoy.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:27.266Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "contour: internal/provisioner: expose the metrics port for envoy", + "description": "Expose the default/changed metric port for envoy\n\nCloses #5232.\n\nSigned-off-by: Gang Liu [gang.liu@daocloud.io](mailto:gang.liu@daocloud.io)", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is a fix for #5232", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "contour", + "incubating", + "networking", + "release-note-small" + ], + "category": "networking", + "cncfProjects": [ + "contour" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/projectcontour/contour/pull/5101", + "sourceRepo": "projectcontour/contour", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:43:27.266Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/contour/contour-5672-wait-for-cache-sync-and-dag-build-before-starting-xds-server.json b/solutions/cncf-generated/contour/contour-5672-wait-for-cache-sync-and-dag-build-before-starting-xds-server.json new file mode 100644 index 00000000..669a07b8 --- /dev/null +++ b/solutions/cncf-generated/contour/contour-5672-wait-for-cache-sync-and-dag-build-before-starting-xds-server.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:21.584Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "contour: wait for cache sync and DAG build before starting xDS server", + "description": "Closes #5550.\nCloses #1280.\n\nI've removed the `x.mgr.GetCache().WaitForCacheSync()` call as it's implicitly handled in the `mgr.Start()` flow.\n\nAs a TLDR, the PR prevents starting the XDS server and building the DAG until the cache is synced with the initial list of k8s objects.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I have one more observation, but otherwise the change looks good to me :+1:\n\nAssume we have a lot of resources that require status update, let's say 2000 `HTTPProxies` with status marked as `invalid`, and the status now needs to be updated `valid` at once, because the error condition was fixed during Contour was down. \n\nThere seems to be a chance now, that Contour will NOT start XDS server before the statuses have been pushed to Kubernetes. It happens like following:\n\n_Sometimes_ Contour manages to acquire lease before client-go sync has finalised. `StatusUpdateHandler` will get started and processing status updates is enabled:\n\n```\ntime=\"2023-08-25T14:16:15+03:00\" level=info msg=\"attempting to acquire leader lease projectcontour/leader-elect...\\n\" caller=\"leaderelection.go:245\" context=kubernetes\ntime=\"2023-08-25T14:16:15+03:00\" level=info msg=\"successfully acquired lease projectcontour/leader-elect\\n\" caller=\"leaderelection.go:255\" context=kubernetes\ntime=\"2023-08-25T14:16:15+03:00\" level=info msg=\"started status update handler\" context=StatusUpdateHandler\ntime=\"2023-08-25T14:16:15+03:00\" level=info msg=\"received a new address for status.loadBalancer\" context=loadBalancerStatusWriter loadbalancer-address=\ntime=\"2023-08-25T14:16:15+03:00\" level=info msg=\"performing delayed update\" context=contourEventHandler last_update=239.3048ms outstanding=3984\n```\n\nIn this case the processing of status updates happens within `rebuildDAG()` before we have set `e.initialDagBuilt = true`. D", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Files](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour) | Coverage Δ | |\n|---|---|---|\n| [internal/featuretests/v3/featuretests.go](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-aW50ZXJuYWwvZmVhdHVyZXRlc3RzL3YzL2ZlYXR1cmV0ZXN0cy5nbw==) | `86.60% <100.00%> (-0.05%)` | :arrow_down: |\n| [internal/contour/handler.go](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-aW50ZXJuYWwvY29udG91ci9oYW5kbGVyLmdv) | `82.16% <61.29%> (-6.07%)` | :arrow_down: |\n| [cmd/contour/serve.go](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-Y21kL2NvbnRvdXIvc2VydmUuZ28=) | `19.82% <0.00%> (-0.31%)` | :arrow_down: |\n\n... and [1 file with indirect coverage changes](https://app.codecov.io/gh/projectcontour/contour/pull/5672/indirect-changes?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour)\n\n
\nThere's actually a better way to implement this. The server should only wait for the first DAG build. The handler itself then would ensure the first DAG build is done after the cache sync.\r\n\r\nI'll push a commit to implement this.\n> Nice work @therealak12!\r\n> \r\n> So, my understanding from this is that `WaitForCacheSync()` might have worked - if we did NOT process the events asynchronously. Since client-go will be unaware of our background processing it returns while we still process our own queue. For this kind of situations, client-go offers utility [`SingleFileTracker`](https://github.com/kubernetes/client-go/blob/e8815ff156658df0fd9284683c5fdcda51a681dc/tools/cache/synctrack/synctrack.go#L80), where user signals their async processing by using `Start()` and `Finished()` call-pair per resource in initial list, and `HasSynced()` which additionally covers the initial queue inside client.go. Is that correct?\r\n> \r\n> I added some small questions inline as well.\r\n> \r\n> The change makes sense to me and seems to work on my machine as well 🙂 👍\r\n> \r\n> Would you add a changelog entry as well?\r\n\r\nThanks for your review and comments. I try to explain what happens exactly:\r\n\r\n- The manager's `WaitForCacheSync` waits until the initial list of Kubernetes objects is delivered to the informers.\r\n- We register our handlers to these informers and ignore the returned `HasSynced` methods.\r\n- Although the full list of initial objects exists in the informers, they're not necessarily handled by the contour handler and thus they don't necessarily exist in the contour's internal cache.\r\n- The DAG rebuild goroutine starts rebuilding DAG and updating Ingress objects *based on its own cache*\r\n\r\nThis PR waits until all of the `HasSynced` methods that are received when registering handlers, return true, and then starts the DAG rebuild process.\r\n\r\nThose `HasSynced` methods would return true when `OnAdd` method of the handler is called for all of the objects in the initial list. If we only rely on these `HasSynced` methods, we may start DAG rebuild process before putting the last object in the cache! (The last object because we use an unbuffered channel and thus OnAdd is blocked until the current object is read by the goroutine.)\r\n\r\nThis is why I've used the `SingleFileTracker`. It's decremented each time the `OnAdd` is called for an object of the initial list and incremented when its handling is done. So if syncTracker.HasSynced returns true, it means we are not processing any objects at that moment.\nI have one more observation, but otherwise the change looks good to me :+1:\r\n\r\nAssume we have a lot of resources that require status update, let's say 2000 `HTTPProxies` with status marked as `invalid`, and the status now needs to be updated `valid` at once, because the error condition was fixed during Contour was down. \r\n\r\nThere seems to be a chance now, that Contour will NOT start XDS server before the statuses have been pushed to Kubernetes. It happens like following:\r\n\r\n_Sometimes_ Contour manages to acquire lease before client-go sync has finalised. `StatusUpdateHandler` will get started and processing status updates is enabled:", + "In this case the processing of status updates happens within `rebuildDAG()` before we have set `e.initialDagBuilt = true`. Due to the default client rate limits (adjustable by `--kubernetes-client-qps` and `--kubernetes-client-burst`) the XDS server will be down for quite a while, depending on how many statuses there are to update. \r\n\r\nIn my test it took 7 minutes to update 2000 `HTTPProxies` until I got this:", + "Since the XDS server does not depend on statuses, I think it would make sense to set `initialDagBuilt = True` to start XDS server **before** looping and sending the status updates: https://github.com/projectcontour/contour/blob/68bafab3d1bcc6fd1436b579e11ae357f966d7bd/internal/contour/handler.go#L243-L250\r\n\r\nCc @sunjayBhatia, @skriss \n> I have one more observation, but otherwise the change looks good to me 👍\r\n> \r\n> Assume we have a lot of resources that require status update, let's say 2000 `HTTPProxies` with status marked as `invalid`, and the status now needs to be updated `valid` at once, because the error condition was fixed during Contour was down.\r\n> \r\n> There seems to be a chance now, that Contour will NOT start XDS server before the statuses have been pushed to Kubernetes. It happens like following:\r\n> \r\n> _Sometimes_ Contour manages to acquire lease before client-go sync has finalised. `StatusUpdateHandler` will get started and processing status updates is enabled:\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "contour", + "incubating", + "networking", + "release-note-minor" + ], + "category": "networking", + "cncfProjects": [ + "contour" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/projectcontour/contour/pull/5672", + "sourceRepo": "projectcontour/contour", + "reactions": 8, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:43:21.584Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/contour/contour-5802-add-per-httpproxy-http-version-support.json b/solutions/cncf-generated/contour/contour-5802-add-per-httpproxy-http-version-support.json new file mode 100644 index 00000000..582a233f --- /dev/null +++ b/solutions/cncf-generated/contour/contour-5802-add-per-httpproxy-http-version-support.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:24.487Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "contour: add per-httpproxy http-version support", + "description": "This PR adds a new field to HTTPProxy spec which specifies the HTTP versions to offer for that HTTPProxy. It's used only when `spec.tls` is set and `spec.tcpproxy` is not.\n\nA critical use case for the field is when we're serving multiple HTTPProxies where a subset of them use the same wildcard certificate. We can disable http/2 for those utilizing this field while keeping it enabled for others.\n\nCloses #5822", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "The Contour project currently lacks enough contributors to adequately respond to all PRs.\n\nThis bot triages PRs according to the following rules:\n\n- After 14d of inactivity, lifecycle/stale is applied\n- After 30d of inactivity since lifecycle/stale was applied, the PR is closed\n\nYou can:\n\n- Mark this PR as fresh by commenting or pushing a commit\n- Close this PR\n- Offer to help out with triage\n\nPlease send feedback to the #contour channel in the Kubernetes Slack", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "contour", + "incubating", + "networking", + "lifecycle-stale", + "release-note-small" + ], + "category": "networking", + "cncfProjects": [ + "contour" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/projectcontour/contour/pull/5802", + "sourceRepo": "projectcontour/contour", + "reactions": 4, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:43:24.487Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/copa/copa-1235-feat-bulk-image-patching.json b/solutions/cncf-generated/copa/copa-1235-feat-bulk-image-patching.json new file mode 100644 index 00000000..50ca64a6 --- /dev/null +++ b/solutions/cncf-generated/copa/copa-1235-feat-bulk-image-patching.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:12.871Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "copa: feat: Bulk Image Patching", + "description": "This PR introduces bulk patching for copa that allows user to patch multiple container images by passing a single YAML config file path in the `--config` flag.\n\neg: `copa patch --config path/config.yaml`\n\nThis is the initial implementation which focuses on the \"comprehensive/update-all\" patching foundational for report based bulk image patching for future prospects.\n\nThe design doc which this is implemented on: [Bulk Image Patching](https://docs.google.com/document/d/1HviV0eZGTQpwaZZ9qDKFJAFtZ0oIxVnWc_QviNqfdHg/edit?usp=sharing)\n\nCloses #631", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@amanycodes can you add an integration test too", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/project-copacetic/copacetic/pull/1235?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=project-copacetic). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=project-copacetic).\n
:rocket: New features to boost your workflow: \n\n- :snowflake: [Test Analytics](https://docs.codecov.com/docs/test-analytics): Detect flaky tests, report on failures, and find test suite problems.\n- :package: [JS Bundle Analysis](https://docs.codecov.com/docs/javascript-bundle-analysis): Save yourself from yourself by tracking and limiting bundle sizes in JS merges.\n
\n@amanycodes can you add an integration test too\n@sozercan I was working on integration tests. the basic implementation is done. I'll add in more cases to make this robust with updated CI workflow. Thanks!\n@sozercan the integration test is done. The only part that remains is testing multiarch images through the config file. I was having some issues with it's dealing with the manifest in the testcontainer registry (working on it). Please let me know if the workflow is set correctly!\n@amanycodes what error do you see when patching manifests with bulk image patching? can we add to this PR to debug\n@ashnamehrotra There was a panic due to a race condition in the main Patch() function from the error channels, i fixed that and the single arch tests are passing. still getting some error in the multiarch ones. \nI think it's more related to the image but I'm not able to point it out.\nWould love your feedback!\n@amanycodes look like the CI is failing. is this ready for review?\n@sozercan the PR is ready for review, the multi arch testing part was where i had some issues. rest the feature is working and unit tests and single arch tests are passing.\n@ashnamehrotra I hope it's good to go now :)\n@amanycodes can we add this to docs?\nFor the failing test, I think you are missing the required tooling:" + ] + } + }, + "metadata": { + "tags": [ + "copa", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "copa" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/project-copacetic/copacetic/pull/1235", + "sourceRepo": "project-copacetic/copacetic", + "reactions": 3, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:47:12.871Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/coredns/coredns-2885-plugin-cache-remove-item-autoritative.json b/solutions/cncf-generated/coredns/coredns-2885-plugin-cache-remove-item-autoritative.json new file mode 100644 index 00000000..741c8611 --- /dev/null +++ b/solutions/cncf-generated/coredns/coredns-2885-plugin-cache-remove-item-autoritative.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:10.123Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "coredns: plugin/cache: remove item.Autoritative", + "description": "Confuses clients if not set; remove it.\n\nFixes #2887", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "### 1. Why is this pull request needed and what does it do?\nPlease see https://github.com/coredns/coredns/issues/2586 for more context.\nTLDR; This PR provides a way to disable one of the caches (positive/negative) and not the other. This enables usders to use coredns for positive caching with a ttl, while opting out of negative caching similar to the `no-neg-cache` support in dnsmasq\n\n### 2. Which documentation changes (if any) need to be made?\ncache.md should be updated, but will update this PR with the doc changes once I get some feedback.\n\n### 3. Does this introduce a backward incompatible change or deprecation?\nNo\n\nAlso this enables us to have a cache policy like the following:\n```\n cache {\n success 5000 5\n denial 0\n }\n```\nthat would enable success cache but disable denial cache. \n\nfixes #2586", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "cache {\r\n success 5000 5\r\n denial 0\r\n }" + ] + } + }, + "metadata": { + "tags": [ + "coredns", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "coredns" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/coredns/coredns/pull/2885", + "sourceRepo": "coredns/coredns", + "reactions": 1, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:45:10.123Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/coredns/coredns-3516-add-private-dns-support-for-azure-plugin.json b/solutions/cncf-generated/coredns/coredns-3516-add-private-dns-support-for-azure-plugin.json new file mode 100644 index 00000000..025d3ad0 --- /dev/null +++ b/solutions/cncf-generated/coredns/coredns-3516-add-private-dns-support-for-azure-plugin.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:11.117Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "coredns: Add private DNS support for azure plugin", + "description": "### 1. Why is this pull request needed and what does it do?\nFixes a bug in `azure` plugin introduced during refactor\n\n### 2. Which issues (if any) are related?\ncloses https://github.com/coredns/coredns/issues/3113\n\n### 3. Which documentation changes (if any) need to be made?\nUpdated the readme\n\n### 4. Does this introduce a backward incompatible change or deprecation?\nYes, changes the syntax for `azure` plugin block", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[ Quoting in \"Re: [coredns/coredns] Add private D...\" ]\n>```\n> azure resource_group_foo:example.org resource_group_foo:example.private {\n> tenant 123abc-123abc-123abc-123abc\n> client 123abc-123abc-123abc-234xyz\n> subscription 123abc-123abc-123abc-563abc\n> secret mysecret\n> access both\n> }\n>```\n\nyes some like this, I would remove the `access` and just have `private` and `public` as\nkeywords. Thus:\n\n```\n azure resource_group_foo:example.org resource_group_foo:example.private {\n tenant 123abc-123abc-123abc-123abc\n client 123abc-123abc-123abc-234xyz\n subscription 123abc-123abc-123abc-563abc\n secret mysecret\n public\n private\n }\n```\n\nAnd default to `public` or so.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/coredns/coredns/pull/3516?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [plugin/azure/azure.go](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree#diff-cGx1Z2luL2F6dXJlL2F6dXJlLmdv) | `10.37% <0%> (-5.45%)` | :arrow_down: |\n| [plugin/azure/setup.go](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree#diff-cGx1Z2luL2F6dXJlL3NldHVwLmdv) | `62.79% <61.76%> (-4.82%)` | :arrow_down: |\n| [core/dnsserver/server.go](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree#diff-Y29yZS9kbnNzZXJ2ZXIvc2VydmVyLmdv) | `11.03% <0%> (ø)` | :arrow_up: |\n| [core/dnsserver/server\\_https.go](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree#diff-Y29yZS9kbnNzZXJ2ZXIvc2VydmVyX2h0dHBzLmdv) | `0% <0%> (ø)` | :arrow_up: |\n| [core/dnsserver/register.go](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree#diff-Y29yZS9kbnNzZXJ2ZXIvcmVnaXN0ZXIuZ28=) | `23.42% <0%> (ø)` | :arrow_up: |\n| [core/dnsserver/server\\_tls.go](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree#diff-Y29yZS9kbnNzZXJ2ZXIvc2VydmVyX3Rscy5nbw==) | `19.35% <0%> (ø)` | :arrow_up: |\n| [core/dnsserver/server\\_grpc.go](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree#diff-Y29yZS9kbnNzZXJ2ZXIvc2VydmVyX2dycGMuZ28=) | `8.1% <0%> (ø)` | :arrow_up: |\n| [plugin/forward/health.go](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree#diff-cGx1Z2luL2ZvcndhcmQvaGVhbHRoLmdv) | `85.29% <0%> (+2.53%)` | :arrow_up: |\n| [plugin/errors/errors.go](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree#diff-cGx1Z2luL2Vycm9ycy9lcnJvcnMuZ28=) | `100% <0%> (+4.76%)` | :arrow_up: |\n| [plugin/forward/setup.go](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree#diff-cGx1Z2luL2ZvcndhcmQvc2V0dXAuZ28=) | `63.46% <0%> (+5.07%)` | :arrow_up: |\n| ... and [1 more](https://codecov.io/gh/coredns/coredns/pull/3516/diff?src=pr&el=tree-more) | |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/coredns/coredns/pull/3516?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/coredns/coredns/pull/3516?src=pr&el=footer). Last update [b8e96b6...c400a6f](https://codecov.io/gh/coredns/coredns/pull/3516?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\n[ Quoting in \"Re: [coredns/coredns] Add private D...\" ]\n>", + "yes some like this, I would remove the `access` and just have `private` and `public` as\nkeywords. Thus:", + "And default to `public` or so." + ] + } + }, + "metadata": { + "tags": [ + "coredns", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "coredns" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/coredns/coredns/pull/3516", + "sourceRepo": "coredns/coredns", + "reactions": 1, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:45:11.117Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/coredns/coredns-3887-plugin-kubernetes-handle-endpoint-tombstones.json b/solutions/cncf-generated/coredns/coredns-3887-plugin-kubernetes-handle-endpoint-tombstones.json new file mode 100644 index 00000000..4ca984fc --- /dev/null +++ b/solutions/cncf-generated/coredns/coredns-3887-plugin-kubernetes-handle-endpoint-tombstones.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:12.960Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "coredns: plugin/kubernetes: Handle endpoint tombstones", + "description": "### 1. Why is this pull request needed and what does it do?\n\n`ToEndpoints()` conversion was filtering out Endpoint deletion tombstones (where the object in the event is not an `*api.Endpoint`, but rather a `cache.DeletedFinalStateUnknown`). The tombstones are essentially indicators that the Endpoint was deleted, without a containing a full copy of the Endpoints object - just a key value. `ToEndpoints()` was returning a nil `*object.Endpoints` when these were encountered, which caused failures downstream when deleting from the index.\n\nThis fix is allows `ToEndpoints()` to pass the tombstones through, so they can be deleted properly from the `clientState`.\n\n### 2. Which issues (if any) are related?\n\nFixes #3860\n\n### 3. Which documentation changes (if any) need to be made?\n\nnone\n\n### 4. Does this introduce a backward incompatible change or deprecation?\n\nno", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "### 1. Why is this pull request needed and what does it do?\n\nAvoid panic and subsequent \"dead\" endpoints informer by checking for nil in the index key func.\nThis is a _workaround_ what I suspect are two upstream bugs in the k8s api libs:\n* that nil endpoints are added to the informer queue in the first place\n* that the panic causes the informer to stop receiving/processing new events.\n\nI verified that this not only avoids the panic, but it also allows the informer to process missed events, and continue processing endpoint changes after the nil endpoint is encountered. IOW, it prevents the informer from dropping dead.\n\n### 2. Which issues (if any) are related?\n\n#3860\n\n### 3. Which documentation changes (if any) need to be made?\n\nnone\n\n### 4. Does this introduce a backward incompatible change or deprecation?\n\nno", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "coredns", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "coredns" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/coredns/coredns/pull/3887", + "sourceRepo": "coredns/coredns", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:12.960Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/coredns/coredns-5320-plugin-cache-add-option-to-adjust-servfail-response-cache-ttl.json b/solutions/cncf-generated/coredns/coredns-5320-plugin-cache-add-option-to-adjust-servfail-response-cache-ttl.json new file mode 100644 index 00000000..c6071c50 --- /dev/null +++ b/solutions/cncf-generated/coredns/coredns-5320-plugin-cache-add-option-to-adjust-servfail-response-cache-ttl.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:08.707Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "coredns: plugin/cache: Add option to adjust SERVFAIL response cache TTL", + "description": "### 1. Why is this pull request needed and what does it do?\n\nAdd an option to customize SERVFAIL response cache TTL within RFC 2308 allowances.\nSetting a 0 TTL will disable caching of SERVFAIL responses.\n\n### 2. Which issues (if any) are related?\n\ncloses #5074\n\n### 3. Which documentation changes (if any) need to be made?\n\nincluded\n\n### 4. Does this introduce a backward incompatible change or deprecation?\n\nno", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "gobencher flaked with the error:\n\n```\nInitializing: exit status 128:\nfatal: cannot mkdir /home/cuong/go/src/github.com/chrisohaver/coredns-0: No space left on device\n```\n\n Clicking \"re-run\" does not appear to result in a re-run.\n\nClicking \"[View more details on gobencher](https://dashboard.github.orijtech.com/benchmark/5224ac2959cc42b087b1fc643489927e)\" results a mostly blank page.\n\ncc @odeke-em", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Initializing: exit status 128:\r\nfatal: cannot mkdir /home/cuong/go/src/github.com/chrisohaver/coredns-0: No space left on device" + ] + } + }, + "metadata": { + "tags": [ + "coredns", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "coredns" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/coredns/coredns/pull/5320", + "sourceRepo": "coredns/coredns", + "reactions": 3, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:08.707Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/coredns/coredns-5833-plugin-nomad-add-a-nomad-plugin.json b/solutions/cncf-generated/coredns/coredns-5833-plugin-nomad-add-a-nomad-plugin.json new file mode 100644 index 00000000..c2c6d49b --- /dev/null +++ b/solutions/cncf-generated/coredns/coredns-5833-plugin-nomad-add-a-nomad-plugin.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:07.601Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "coredns: plugin/nomad: Add a Nomad plugin", + "description": "This commit introduces a `nomad` plugin which provides a DNS interface for querying Nomad services.\n\n### 1. Why is this pull request needed and what does it do?\n\nThis PR adds a `nomad` plugin. It provides the capability to query [Nomad services API](https://developer.hashicorp.com/nomad/api-docs/services) via DNS.\n\n### 2. Which issues (if any) are related?\n\nhttps://github.com/coredns/coredns/issues/5829\n\n### 3. Which documentation changes (if any) need to be made?\n\nThe plugin has a README.md so I believe that is sufficient.\n\n### 4. Does this introduce a backward incompatible change or deprecation?\n\nNo.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@mr-karan, is it ok with you if someone else picks up your proposed code to complete the feature?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "coredns", + "graduated", + "networking", + "stale", + "needs-update" + ], + "category": "networking", + "cncfProjects": [ + "coredns" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/coredns/coredns/pull/5833", + "sourceRepo": "coredns/coredns", + "reactions": 8, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:45:07.601Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/coredns/coredns-6776-fix-plugin-kubernetes-fix-autopath-for-host-network-pods.json b/solutions/cncf-generated/coredns/coredns-6776-fix-plugin-kubernetes-fix-autopath-for-host-network-pods.json new file mode 100644 index 00000000..5b756a1e --- /dev/null +++ b/solutions/cncf-generated/coredns/coredns-6776-fix-plugin-kubernetes-fix-autopath-for-host-network-pods.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:06.341Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "coredns: fix: plugin/kubernetes: Fix autopath for host network pods", + "description": "### 1. Why is this pull request needed and what does it do?\n\nThis PR fixes the issue that the autopath plugin isn't working for pods running in the host network.\nThe reason this is marked as a fix and not a feature is because the current behaviour is causing issues in combination with caching and has inconsistencies.\n\nCurrently the AutoPath function fetches the pods by request IP to check where the request is coming from. For pods running in the host network this would be the Node IP. So if you have multiple pods running in host networking it would return multiple pods but only the first one is being used. This means that autopath might work for a pod running on one node because it happens to be the first one in the list, but it doesn't work on another node. For normal pods in the same namespace the same DNS query would also work with autopath.\n\nIn itself this is not a big issue, but in combination with caching this can cause lookup failures because A records and AAAA records are cache", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is a follow up PR for: https://github.com/coredns/coredns/pull/6776\n@chrisohaver, let me know if you're happy with the changes.\n\n1. Why is this pull request needed and what does it do?\nThis PR fixes the issue that the autopath plugin isn't working for pods running in the host network.\nThe reason this is marked as a fix and not a feature is because the current behaviour is causing issues in combination with caching and has inconsistencies.\n\nCurrently the AutoPath function fetches the pods by request IP to check where the request is coming from. For pods running in the host network this would be the Node IP. So if you have multiple pods running in host networking it would return multiple pods but only the first one is being used. This means that autopath might work for a pod running on one node because it happens to be the first one in the list, but it doesn't work on another node. For normal pods in the same namespace the same DNS query would also work with autopath.\n\nIn itself this is not a big issue, but in combination with caching this can cause lookup failures because A records and AAAA records are cached differently and in our resolvers if one of them returns an answer and the other one doesn't then the whole query fails with NXDOMAIN and it doesn't try the rest of the search path.\n\nThis implementation takes all host network pods on the node, matches the query against them and takes the namespace from the pod that matches. Of course any pod could request a DNS name i", + "steps": [ + "Why is this pull request needed and what does it do?" + ] + } + }, + "metadata": { + "tags": [ + "coredns", + "graduated", + "networking", + "stale", + "needs-update" + ], + "category": "networking", + "cncfProjects": [ + "coredns" + ], + "targetResourceKinds": [ + "Pod", + "Namespace", + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/coredns/coredns/pull/6776", + "sourceRepo": "coredns/coredns", + "reactions": 16, + "comments": 1 + }, + "security": { + "scannedAt": "2026-02-27T17:45:06.341Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cortex/cortex-2839-fix-memory-leak-caused-by-object-pool-and-zero-copy.json b/solutions/cncf-generated/cortex/cortex-2839-fix-memory-leak-caused-by-object-pool-and-zero-copy.json new file mode 100644 index 00000000..a8e08b4d --- /dev/null +++ b/solutions/cncf-generated/cortex/cortex-2839-fix-memory-leak-caused-by-object-pool-and-zero-copy.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:29.432Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cortex: Fix memory leak caused by object pool and zero copy", + "description": "**What this PR does**:\nThe purpose of this PR is to fix the memory leak of Cortex Ingester. After this PR is merged, it is expected to reduce the runtime memory of Ingester by at least 30%.\n\n**Which issue(s) this PR fixes**:\nFixes #2665 \n\nAlthough the PR code looks very simple, the principle inside is more complicated. I try to make it as easy to understand as possible. If there are errors in some parts, please correct me.\nWhen troubleshooting the memory problem of Ingester, I simplified Ingester.Push to make it do almost nothing. Like below:\n```\n// Push implements client.IngesterServer\nfunc (i *Ingester) Push(ctx context.Context, req *client.WriteRequest) (*client.WriteResponse, error) {\n defer client.ReuseSlice(req.Timeseries)\n return &client.WriteResponse{}, nil\n}\n```\nAfter the above modifications, Ingester still takes up very high memory (60G+), until OOM, I think it must be a problem with `client.ReuseSlice`.\n\nHowever, when I commented out the reuse of TimeSeries, the proble", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "May I ask how large the incoming messages are? E.g. how many samples do you allow per `Push()` ?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "// Push implements client.IngesterServer\r\nfunc (i *Ingester) Push(ctx context.Context, req *client.WriteRequest) (*client.WriteResponse, error) {\r\n defer client.ReuseSlice(req.Timeseries)\r\n return &client.WriteResponse{}, nil\r\n}", + "func ReuseTimeseries(ts *TimeSeries) {\r\n\t// ts.Labels = ts.Labels[:0]\r\n\t// ts.Samples = ts.Samples[:0]\r\n\t// timeSeriesPool.Put(ts)\r\n}", + "ts.Labels = ts.Labels[:0]\r\nts.Samples = ts.Samples[:0]" + ] + } + }, + "metadata": { + "tags": [ + "cortex", + "incubating", + "observability", + "size-xs" + ], + "category": "observability", + "cncfProjects": [ + "cortex" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cortexproject/cortex/pull/2839", + "sourceRepo": "cortexproject/cortex", + "reactions": 3, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:43:29.432Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cortex/cortex-6733-feat-added-name-validation-scheme-as-a-config-field-and-flag-as-well.json b/solutions/cncf-generated/cortex/cortex-6733-feat-added-name-validation-scheme-as-a-config-field-and-flag-as-well.json new file mode 100644 index 00000000..68f037c4 --- /dev/null +++ b/solutions/cncf-generated/cortex/cortex-6733-feat-added-name-validation-scheme-as-a-config-field-and-flag-as-well.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:31.113Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cortex: feat: added name validation scheme as a config field and flag as well", + "description": "**What this PR does**:\n- Adds `name_validation_scheme` as yaml flag and `name.validation.scheme` as a flag\n- Remove config init and set the model.NameValidationScheme directly in New() method to creating a Cortex object\n\n**Which issue(s) this PR fixes**:\nFixes #6702 \n\n**Checklist**\n- [x] Tests updated\n- [x] Documentation added\n- [x] `CHANGELOG.md` updated - the order of entries should be `[CHANGE]`, `[FEATURE]`, `[ENHANCEMENT]`, `[BUGFIX]`", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What this PR does**:\nAdds name_validation_scheme as a YAML config field and -name.validation_scheme as a CLI flag to configure the metric and label name validation mode. Supports legacy (default) and UTF-8 options. \n**Which issue(s) this PR fixes**:\nFixes #6702 \nFollow Up #6733 \n**Checklist**\n- [x] Tests updated\n- [x] Documentation added\n- [x] `CHANGELOG.md` updated - the order of entries should be `[CHANGE]`, `[FEATURE]`, `[ENHANCEMENT]`, `[BUGFIX]`", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "protoc -I /go/src:./vendor/github.com/thanos-io/thanos/pkg:./vendor/github.com/gogo/protobuf:./vendor:./pkg/ruler --gogoslick_out=plugins=grpc,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,:./pkg/ruler ./pkg/ruler/ruler.proto\r\nmisspell -error docs\r\n# Configured via .golangci.yml.\r\ngolangci-lint run\r\nlevel=warning msg=\"[config_reader] The configuration option `output.format` is deprecated, please use `output.formats`\"\r\nlevel=warning msg=\"[config_reader] The configuration option `linters.errcheck.exclude` is deprecated, please use `linters.errcheck.exclude-functions`.\"\r\npkg/cortex/cortex.go:72:1: File is not properly formatted (gofmt)\r\n\terrInvalidHTTPPrefix = errors.New(\"HTTP prefix should be empty or start with /\")\r\n^\r\nmake: *** [Makefile:176: lint] Error 1" + ] + } + }, + "metadata": { + "tags": [ + "cortex", + "incubating", + "observability", + "component-documentation", + "size-m" + ], + "category": "observability", + "cncfProjects": [ + "cortex" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/cortexproject/cortex/pull/6733", + "sourceRepo": "cortexproject/cortex", + "reactions": 2, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:43:31.113Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-2417-update-contrib-systemd-unit-files-to-match-project-name.json b/solutions/cncf-generated/cri-o/cri-o-2417-update-contrib-systemd-unit-files-to-match-project-name.json new file mode 100644 index 00000000..cc1872de --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-2417-update-contrib-systemd-unit-files-to-match-project-name.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:20.401Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: Update contrib systemd unit files to match project name", + "description": "Closes #2415", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fixes #2415 \n\nSigned-off-by: Mrunal Patel ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "size-xs", + "lgtm", + "approved", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/2417", + "sourceRepo": "cri-o/cri-o", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:20.401Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-2447-add-support-for-dual-stack-ipv6.json b/solutions/cncf-generated/cri-o/cri-o-2447-add-support-for-dual-stack-ipv6.json new file mode 100644 index 00000000..fd0b4f4b --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-2447-add-support-for-dual-stack-ipv6.json @@ -0,0 +1,57 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:22.344Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: Add support for dual stack IPv6", + "description": "We have to vendor the latest Kubernetes master as well as OCICNI to get\nthe latest support for dual-stack IPv6. This adds an `AdditionalIps`\nfield to the network status, which will be internally handled as a\nsimple slice of strings. This means that the pod annotations now can\ncontain multiple IPs as well. The same applied to the inspect HTTP API.\n\nRelates to: https://github.com/kubernetes/kubernetes/pull/73977\nCurrent status: Under testing since the kubernetes PR has been merged. \n\nCloses #2554", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n/kind api-change\n\n/kind feature\n\n**Implements**: https://github.com/kubernetes/enhancements/pull/808\nIncluded:\n- [x] Types modification.\n- [x] Conversions.\n- [x] new conversion unit tests.\n- [x] (updated existing unit tests to match the type changes).\n- [x] node ipam controller that supports multi cidrs.\n- [x] unit tests: node ipam mutli cidr.\n- [x] azure cloud provider support for mutli routes per node.\n- [x] unit tests: azure cloud provider support for mutli routes per node.\n- [x] kubenet support for dual-stack.\n- [x] revise host networking for dual-stack (v1.16).\n- [x] unit tests: kubenet support for dual-stack.\n- [x] e2e tests (as a stand alone PR referenced below).\n\n```release-note\nTo configure controller manager to use ipv6dual stack:\nuse --cluster-cidr=\",\".\n\nNotes:\n \n1. Only the first two cidrs are used (soft limits for Alpha, might be lifted later on). \n2. Only the \"RangeAllocator\" (default) is allowed as a value for --cidr-allocator-type . Cloud allocators are not compatible with ipv6dualstack \n```\n\nCC @lachie83 @thockin\n\n### What is in the box? (alpha status)\n- Node ipam controller now supports multi-cidr per node (node.Spec.PodCIDRs).\n- Pod.PodStatus.PodIPs supports multiple IPs.\n- Route Controller now supports creating routes `per node's cidr`.\n- kubenet now supports dualstack. \n- Azure support `Route() interface` implementation for dualstack.\n- Known issue: kubenet will always force reporting ipv4, ipv6 (irrespective of ", + "steps": [ + "Only the first two cidrs are used (soft limits for Alpha, might be lifted later on).", + "Only the \"RangeAllocator\" (default) is allowed as a value for --cidr-allocator-type . Cloud allocators are not compatible with ipv6dualstack" + ], + "codeSnippets": [ + "CC @lachie83 @thockin\r\n\r\n\r\n### What is in the box? (alpha status)\r\n- Node ipam controller now supports multi-cidr per node (node.Spec.PodCIDRs).\r\n- Pod.PodStatus.PodIPs supports multiple IPs.\r\n- Route Controller now supports creating routes `per node's cidr`.\r\n- kubenet now supports dualstack. \r\n- Azure support `Route() interface` implementation for dualstack.\r\n- Known issue: kubenet will always force reporting ipv4, ipv6 (irrespective of podCIDRs order).\r\n- updates to CRI to support reporting multiple IPs per `PodSandBox`.\r\n\r\n### Known Issues\r\n- Cluster ipv6 CIDRS mask bigger than `24` will fail\r\n- ipv6 cidr assignment is using the default ipv4 cidr `/24` (Future: add controls over v6 cidr size)\r\n- kubenet forces `v4,v6` reporting of IPs, users who must`v6,v4` as `--cluster-cidr` \r\n- Masquerading is not done by kubenet. Users will have to use `ip-masq-agent` to perform masquerading correctly for ipv6. A standing PR has been created to support this feature https://github.com/kubernetes-incubator/ip-masq-agent/pull/45 \n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *
saschagrunert*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=cri-o%2Fcri-o).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[OWNERS](https://github.com/cri-o/cri-o/blob/master/OWNERS)~~ [saschagrunert]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *saschagrunert*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=cri-o%2Fcri-o).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[OWNERS](https://github.com/cri-o/cri-o/blob/master/OWNERS)~~ [saschagrunert]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n# [Codecov](https://codecov.io/gh/cri-o/cri-o/pull/2447?src=pr&el=h1) Report\n> Merging [#2447](https://codecov.io/gh/cri-o/cri-o/pull/2447?src=pr&el=desc) into [master](https://codecov.io/gh/cri-o/cri-o/commit/dab780e903a4f6b802f1519a2610b685103df80e?src=pr&el=desc) will **decrease** coverage by `0.38%`.\n> The diff coverage is `15.55%`.", + "No, the (new) `podIPs` array in the pod object still contains only the ipv4 address;", + "I applied the PR with;" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "size-xxl", + "sig-network", + "lgtm", + "approved", + "sig-storage", + "sig-apps", + "sig-cloud-provider", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/2447", + "sourceRepo": "cri-o/cri-o", + "reactions": 1, + "comments": 41 + }, + "security": { + "scannedAt": "2026-02-27T17:45:22.344Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-269-refactor-move-core-logical-from-server-to-manager.json b/solutions/cncf-generated/cri-o/cri-o-269-refactor-move-core-logical-from-server-to-manager.json new file mode 100644 index 00000000..4d15be33 --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-269-refactor-move-core-logical-from-server-to-manager.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:26.876Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: [Refactor] Move core logical from server to manager", + "description": "Move core logical from server to manager to allow share underlying code(here I make it as manager) between `server` and `kpod`. \n\nNote: add a redundant commit just copy code from server/* to manager/*, make it easy to figure out what have changed in those files.\n\n~~Also fix #268~~ Edit: revert that commit.\n\nPTAL @mrunalp @runcom @feiskyer", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@runcom @sameo PTAL. See if this helps with races during tests.\n\nSigned-off-by: Mrunal Patel ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/269", + "sourceRepo": "cri-o/cri-o", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:26.876Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-2912-extend-image-pull-metrics.json b/solutions/cncf-generated/cri-o/cri-o-2912-extend-image-pull-metrics.json new file mode 100644 index 00000000..35468477 --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-2912-extend-image-pull-metrics.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:23.915Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: Extend image pull metrics", + "description": "We now add a new set of image pull metrics, which collect the transferred\nbytes per image name or digest every second during an image pull. We\nalso add a metric which collects the skipped bytes if an image has\nalready been downloaded.\n\nThis fixes the ignored command line parameters for the metrics, too.\n\nExample output from the metrics server:\n1. Download the image \n1. Remove the image, download it again\n1. Download the already existing image \n```bash\n# HELP container_runtime_crio_image_pulls_by_digest Bytes transferred by CRI-O image pulls by digest\n# TYPE container_runtime_crio_image_pulls_by_digest counter\ncontainer_runtime_crio_image_pulls_by_digest{digest=\"sha256:2f1a357cf2898f6700b74d933e1784a348df8f719c4db77203423e63b538de9b\",mediatype=\"application/vnd.docker.container.image.v1+json\",name=\"docker.io/nixos/nix\",size=\"5127\"} 10254\ncontainer_runtime_crio_image_pulls_by_digest{digest=\"sha256:480223332b134112f7f7f1292789ae19b695c1329a7180200ae82778fab59e95\",mediatype=\"\",name=\"docker.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "The progress reader will be now created via the new `newProgressReader`\nfunction. This way we have the possibility to trigger a new event called\n`ProgressEventNewArtifact` and `ProgressEventDone` to indicate the\nboundaries of the artifact download to the API consumer. During the\ndownload, we additionally provide the `OffsetUpdate`, which can be used\nto report the downloaded data during the last elapsed time interval.\n\nUnit tests have been added as well to the progress reader, whereas the\ndocumentation for the new types has been enhanced as well.\n\nNeeded for metrics improvements for CRI-O: https://github.com/cri-o/cri-o/pull/2912", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# HELP container_runtime_crio_image_pulls_by_digest Bytes transferred by CRI-O image pulls by digest\r\n# TYPE container_runtime_crio_image_pulls_by_digest counter\r\ncontainer_runtime_crio_image_pulls_by_digest{digest=\"sha256:2f1a357cf2898f6700b74d933e1784a348df8f719c4db77203423e63b538de9b\",mediatype=\"application/vnd.docker.container.image.v1+json\",name=\"docker.io/nixos/nix\",size=\"5127\"} 10254\r\ncontainer_runtime_crio_image_pulls_by_digest{digest=\"sha256:480223332b134112f7f7f1292789ae19b695c1329a7180200ae82778fab59e95\",mediatype=\"\",name=\"docker.io/nixos/nix\",size=\"277005\"} 554010\r\ncontainer_runtime_crio_image_pulls_by_digest{digest=\"sha256:74bc3bbbef74304d58600283df51bab6a3dbe9b1ec939bd9f5db7b3c87cfb612\",mediatype=\"\",name=\"docker.io/nixos/nix\",size=\"60336471\"} 1.20672942e+08\r\ncontainer_runtime_crio_image_pulls_by_digest{digest=\"sha256:9d48c3bd43c520dc2784e868a780e976b207cbf493eaff8c6596eb871cbd9609\",mediatype=\"\",name=\"docker.io/nixos/nix\",size=\"2789669\"} 5.579338e+06\r\n# HELP container_runtime_crio_image_pulls_by_name Bytes transferred by CRI-O image pulls by name\r\n# TYPE container_runtime_crio_image_pulls_by_name counter\r\ncontainer_runtime_crio_image_pulls_by_name{name=\"docker.io/nixos/nix\",size=\"63408272\"} 1.26816544e+08\r\n# HELP container_runtime_crio_image_pulls_by_name_skipped Bytes skipped by CRI-O image pulls by name\r\n# TYPE container_runtime_crio_image_pulls_by_name_skipped counter\r\ncontainer_runtime_crio_image_pulls_by_name_skipped{name=\"docker.io/nixos/nix\"} 1.91975283e+08" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "size-l", + "lgtm", + "approved", + "dco-signoff--yes", + "release-1-17" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/2912", + "sourceRepo": "cri-o/cri-o", + "reactions": 1, + "comments": 43 + }, + "security": { + "scannedAt": "2026-02-27T17:45:23.915Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-3060-support-pulling-image-specified-by-tag-and-digest.json b/solutions/cncf-generated/cri-o/cri-o-3060-support-pulling-image-specified-by-tag-and-digest.json new file mode 100644 index 00000000..e8f3c96b --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-3060-support-pulling-image-specified-by-tag-and-digest.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:28.416Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: Support pulling image specified by tag and digest.", + "description": "Favour the digest over the tag if both specified.\nThis is required for docker compatibility.\nfixes #2351.\n\n_Docker supports canonical image references (`repo/image:tag@sha`) that contain both a tag and an image ID. The latter is actually used for resolution. Despite its ambiguity this feature is used by tools like skaffold to ensure that the correct image version is used within k8s manifests and enforce rollouts even when the tag was overwritten (e.g. `skaffold dev`) while having the tag there as a hint/documentation.\nA downside of this feature - as for image ID based resolution in general - is that, when the image ID is deleted from the registry somehow, an image rebuild will not help as long as you don't regenerate the manifests and do a rollout as well since the new image will usually have a different ID/digest due to timestamps._\n\n**- What I did**\nWhen a remote image reference is specified ambiguously with both tag and digest its tag is removed to resolve based on the image ID only", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Refers to #783.\n\nChanges:\n- Moved the code that returns an error when both tag and digest are\n present from `newReference` to `reference.newImageDestination` in\n order to allow pulls but no pushes.\n- If present use digest instead of tag as `DockerReferenceIdentity`\n or rather for policy matching.\n- Adjusted tests.\n\nSigned-off-by: Max Goltzsche \n\nQuestion remains how well this works with the other tools in the chain - needs to be tested first which is why this is still WIP.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "size-m", + "lgtm", + "approved", + "ok-to-test", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/3060", + "sourceRepo": "cri-o/cri-o", + "reactions": 1, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:45:28.416Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-4082-runtimevm-store-logs-again-and-store-them-in-the-correct-format.json b/solutions/cncf-generated/cri-o/cri-o-4082-runtimevm-store-logs-again-and-store-them-in-the-correct-format.json new file mode 100644 index 00000000..ab0de7fb --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-4082-runtimevm-store-logs-again-and-store-them-in-the-correct-format.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:30.950Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: runtimeVM: Store logs again, and store them in the correct format", + "description": "#### What type of PR is this?\n\n/kind bug\n\n#### What this PR does / why we need it:\n\nThe first patch of this series reverts commit 924a8e9830f112821f03984d417f938f945ba442, as the commit in question introduced a regression on CRI-O, when using the VM runtime type, which caused the logs to never be stored in:\n`/var/log/pods/$namespace_$pod_name_$pod_uid/$container_name/$restart_number.log`\n\nThe second commit fixes the log format as the current format of logs stored by CRI-O, when using the VM runtime type, looks like:\n```\n[root@k8s cri-o]# cat /var/log/pods/default_fio-test_855d9cec-2b0f-42d2-bbe5-cce769cb7b10/fio/0.log\n{\n \"fio version\" : \"fio-2.17-45-g06cb\",\n \"timestamp\" : 1597428680,\n \"timestamp_ms\" : 1597428680853,\n \"time\" : \"Fri Aug 14 18:11:20 2020\",\n \"jobs\" : [\n {\n \"jobname\" : \"random-writers-emptydir\",\n \"groupid\" : 0,\n \"error\" : 0,\n \"eta\" : 0,\n \"elapsed\" : 61,\n \"job options\" : {\n \"name\" : \"random-writers-emptydir\",\n \"filename\" : ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is an automated cherry-pick of #4082\n\n/assign fidencio\n\n```release-note\nNone\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "[root@k8s cri-o]# cat /var/log/pods/default_fio-test_855d9cec-2b0f-42d2-bbe5-cce769cb7b10/fio/0.log\r\n{\r\n \"fio version\" : \"fio-2.17-45-g06cb\",\r\n \"timestamp\" : 1597428680,\r\n \"timestamp_ms\" : 1597428680853,\r\n \"time\" : \"Fri Aug 14 18:11:20 2020\",\r\n \"jobs\" : [\r\n {\r\n \"jobname\" : \"random-writers-emptydir\",\r\n \"groupid\" : 0,\r\n \"error\" : 0,\r\n \"eta\" : 0,\r\n \"elapsed\" : 61,\r\n \"job options\" : {\r\n \"name\" : \"random-writers-emptydir\",\r\n \"filename\" : \"/test-volume/testfile\",\r\n \"ioengine\" : \"libaio\",\r\n \"iodepth\" : \"16\",\r\n \"rw\" : \"randwrite\",\r\n \"bs\" : \"512\",\r\n \"direct\" : \"0\",\r\n \"size\" : \"1G\",\r\n \"numjobs\" : \"1\",\r\n \"runtime\" : \"60s\",\r\n \"fallocate\" : \"none\",\r\n \"invalidate\" : \"1\"\r\n },\r\n...", + "[root@k8s cri-o]# cat /var/log/pods/default_fio-test_6278a00a-1abe-41e8-b048-5c22be826876/fio/0.log\r\n2020-08-14T20:20:02.17267229+02:00 stdout F {\r\n2020-08-14T20:20:02.172720217+02:00 stdout F \"fio version\" : \"fio-2.17-45-g06cb\",\r\n2020-08-14T20:20:02.172726757+02:00 stdout F \"timestamp\" : 1597429202,\r\n2020-08-14T20:20:02.172749864+02:00 stdout F \"timestamp_ms\" : 1597429202414,\r\n2020-08-14T20:20:02.172752913+02:00 stdout F \"time\" : \"Fri Aug 14 18:20:02 2020\",\r\n2020-08-14T20:20:02.172755049+02:00 stdout F \"jobs\" : [\r\n2020-08-14T20:20:02.172757593+02:00 stdout F {\r\n2020-08-14T20:20:02.172759998+02:00 stdout F \"jobname\" : \"random-writers-emptydir\",\r\n2020-08-14T20:20:02.172762745+02:00 stdout F \"groupid\" : 0,\r\n2020-08-14T20:20:02.172764845+02:00 stdout F \"error\" : 0,\r\n2020-08-14T20:20:02.172766929+02:00 stdout F \"eta\" : 0,\r\n2020-08-14T20:20:02.172769756+02:00 stdout F \"elapsed\" : 61,\r\n2020-08-14T20:20:02.1727719+02:00 stdout F \"job options\" : {\r\n2020-08-14T20:20:02.172774095+02:00 stdout F \"name\" : \"random-writers-emptydir\",\r\n2020-08-14T20:20:02.172776168+02:00 stdout F \"filename\" : \"/test-volume/testfile\",\r\n2020-08-14T20:20:02.172778173+02:00 stdout F \"ioengine\" : \"libaio\",\r\n2020-08-14T20:20:02.172780201+02:00 stdout F \"iodepth\" : \"16\",\r\n2020-08-14T20:20:02.172782184+02:00 stdout F \"rw\" : \"randwrite\",\r\n2020-08-14T20:20:02.172784156+02:00 stdout F \"bs\" : \"512\",\r\n2020-08-14T20:20:02.17278615+02:00 stdout F \"direct\" : \"0\",\r\n2020-08-14T20:20:02.172788418+02:00 stdout F \"size\" : \"1G\",\r\n2020-08-14T20:20:02.172791139+02:00 stdout F \"numjobs\" : \"1\",\r\n2020-08-14T20:20:02.1727932+02:00 stdout F \"runtime\" : \"60s\",\r\n2020-08-14T20:20:02.172795277+02:00 stdout F \"fallocate\" : \"none\",\r\n2020-08-14T20:20:02.172797446+02:00 stdout F \"invalidate\" : \"1\"\r\n2020-08-14T20:20:02.1727995+02:00 stdout F },\r\n...", + "[root@k8s cri-o]# kubectl logs fio-test\r\n{\r\n \"fio version\" : \"fio-2.17-45-g06cb\",\r\n \"timestamp\" : 1597429202,\r\n \"timestamp_ms\" : 1597429202414,\r\n \"time\" : \"Fri Aug 14 18:20:02 2020\",\r\n \"jobs\" : [\r\n {\r\n \"jobname\" : \"random-writers-emptydir\",\r\n \"groupid\" : 0,\r\n \"error\" : 0,\r\n \"eta\" : 0,\r\n \"elapsed\" : 61,\r\n \"job options\" : {\r\n \"name\" : \"random-writers-emptydir\",\r\n \"filename\" : \"/test-volume/testfile\",\r\n \"ioengine\" : \"libaio\",\r\n \"iodepth\" : \"16\",\r\n \"rw\" : \"randwrite\",\r\n \"bs\" : \"512\",\r\n \"direct\" : \"0\",\r\n \"size\" : \"1G\",\r\n \"numjobs\" : \"1\",\r\n \"runtime\" : \"60s\",\r\n \"fallocate\" : \"none\",\r\n \"invalidate\" : \"1\"\r\n },\r\n..." + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "kind-bug", + "lgtm", + "approved", + "release-note-none", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [ + "Pod", + "Job", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/4082", + "sourceRepo": "cri-o/cri-o", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:45:30.950Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-4137-fix-pinns-compilation-for-temp-failure-retry.json b/solutions/cncf-generated/cri-o/cri-o-4137-fix-pinns-compilation-for-temp-failure-retry.json new file mode 100644 index 00000000..086ea07b --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-4137-fix-pinns-compilation-for-temp-failure-retry.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:29.386Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: Fix pinns compilation for TEMP_FAILURE_RETRY", + "description": "#### What type of PR is this?\n\n/kind bug\n\n#### What this PR does / why we need it:\nIn case the macro is not available we now define it on our own in the utils.\n#### Which issue(s) this PR fixes:\n\nFixes https://github.com/cri-o/cri-o/issues/4136\n#### Special notes for your reviewer:\n\n#### Does this PR introduce a user-facing change?\n\n```release-note\n- Fixed pinns compilation if the `TEMP_FAILURE_RETRY` macro is not available\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/test integration_rhel", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# [Codecov](https://codecov.io/gh/cri-o/cri-o/pull/4137?src=pr&el=h1) Report\n> Merging [#4137](https://codecov.io/gh/cri-o/cri-o/pull/4137?src=pr&el=desc) into [master](https://codecov.io/gh/cri-o/cri-o/commit/be9e42b7010803346df24c374d98c50de1ae7d29?el=desc) will **not change** coverage.\n> The diff coverage is `n/a`." + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "kind-bug", + "lgtm", + "approved", + "release-note", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/4137", + "sourceRepo": "cri-o/cri-o", + "reactions": 1, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:45:29.386Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-4354-runtime-vm-ensure-closeiochan-is-not-nil-inside-closestdin-s-function.json b/solutions/cncf-generated/cri-o/cri-o-4354-runtime-vm-ensure-closeiochan-is-not-nil-inside-closestdin-s-function.json new file mode 100644 index 00000000..0303a543 --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-4354-runtime-vm-ensure-closeiochan-is-not-nil-inside-closestdin-s-function.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:25.481Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: runtime_vm: Ensure closeIOChan is not nil inside CloseStdin's function …", + "description": "#### What type of PR is this?\n\n/kind bug\n\n#### What this PR does / why we need it:\n\nThis patch fixes the situation of `kubectl cp` hanging forever.\n\nDebugging this issue I've faced a quite interesting situatuon where after `closeIOChan` is closed, the `CloseStdin` functions \"resumes\" while `closeIOChan` is **nil**, making the function to sit there and wait forever.\n\nHonestly, this bugs my mind quite a bit as I was under the impression that all the everyone waiting for the channel to be closed would be notified, would consume its content, and only after that it'd be nullified. Well, Today I learnt.\n\nThe simplest and more future proof way to work this issue around is to ensure `closeIOChan` is not **nil** inside CloseStdin's function.\n\nThanks to @liubin, @haircommander, and @r4f4 for being available to discuss the issue.\n\n#### Which issue(s) this PR fixes:\n\nFixes #4353\n\n#### Special notes for your reviewer:\n\n#### Does this PR introduce a user-facing change?\n\n```release-note\nNone\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Forward port PR kata-containers/runtime#3013 for issue kata-containers/runtime#3001.\n\nFixes: #1076\n\nSigned-off-by: Bo Chen ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Forward port PR kata-containers/runtime#3013 for issue kata-containers/runtime#3001.\r\n\r\nFixes: #1076\r\n\r\nSigned-off-by: Bo Chen \n/area-vm\n@haircommander, updated in order to keep the `defer()` function. Thanks for the suggestion.\n# [Codecov](https://codecov.io/gh/cri-o/cri-o/pull/4354?src=pr&el=h1) Report\n> Merging [#4354](https://codecov.io/gh/cri-o/cri-o/pull/4354?src=pr&el=desc) (6e897b8) into [master](https://codecov.io/gh/cri-o/cri-o/commit/943f033cbb67995e4e002bba64cd38d80dcf8314?el=desc) (943f033) will **not change** coverage.\n> The diff coverage is `0.00%`." + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "kind-bug", + "lgtm", + "approved", + "release-note-none", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/4354", + "sourceRepo": "cri-o/cri-o", + "reactions": 1, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:45:25.481Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-4631-config-don-t-fail-if-the-non-default-runtime-doesn-t-pass-validation.json b/solutions/cncf-generated/cri-o/cri-o-4631-config-don-t-fail-if-the-non-default-runtime-doesn-t-pass-validation.json new file mode 100644 index 00000000..236685f4 --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-4631-config-don-t-fail-if-the-non-default-runtime-doesn-t-pass-validation.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:31.930Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: config: Don't fail if the non default runtime doesn't pass validation", + "description": "#### What type of PR is this?\n\n/kind bug\n\n#### What this PR does / why we need it:\n\nCRI-O is too strict by simply not starting up in case a **non** default runtime is misconfigured.\n\n#### Which issue(s) this PR fixes:\n\nFixes: #4621\n\n#### Special notes for your reviewer:\n\nI need to get to writing tests to it, but I wasn't successful on a quick try. :-/\n\n#### Does this PR introduce a user-facing change?\n\n```release-note\nInstead of failing to start, CRI-O now only prints a warning and ignores the runtime, in case a **non** default runtime is misconfigured.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "changes LGTM\nassuming happy tests and an added test would be nice If possible.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "changes LGTM\r\nassuming happy tests and an added test would be nice If possible.\n/test kata-containers\r\n\n/test kata-containers\nOkay, I just added some basic unit test for this, and CI seems to be happy about it.\n# [Codecov](https://codecov.io/gh/cri-o/cri-o/pull/4631?src=pr&el=h1) Report\n> Merging [#4631](https://codecov.io/gh/cri-o/cri-o/pull/4631?src=pr&el=desc) (98742ba) into [master](https://codecov.io/gh/cri-o/cri-o/commit/4fff6999c779dd865e9a6d4100de7b65a7b3a5c7?el=desc) (4fff699) will **increase** coverage by `0.03%`.\n> The diff coverage is `100.00%`.\n\n> :exclamation: Current head 98742ba differs from pull request most recent head f04cdc5. Consider uploading reports for the commit f04cdc5 to get more accurate results" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "kind-bug", + "lgtm", + "approved", + "release-note", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/4631", + "sourceRepo": "cri-o/cri-o", + "reactions": 1, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:45:31.930Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-579-implement-non-terminal-attach.json b/solutions/cncf-generated/cri-o/cri-o-579-implement-non-terminal-attach.json new file mode 100644 index 00000000..5bfe23a0 --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-579-implement-non-terminal-attach.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:15.476Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: Implement non-terminal attach", + "description": "We use a SOCK_SEQPACKET socket for the attach unix domain socket, which\nmeans the kernel will ensure that the reading side only ever get the\ndata from one write operation. We use this for frameing, where the\nfirst byte is the pipe that the next bytes are for. We have to make sure\nthat all reads from the socket are using at least the same size of buffer\nas the write side, because otherwise the extra data in the message\nwill be dropped.\n\nThis also adds a stdin pipe for the container, similar to the ones we\nuse for stdout/err, because we need a way for an attached client\nto write to stdin, even if not using a tty.\n\nThis fixes #569\n\nNote: This depends on #571 (and thus contains those commits), only the last commit is actually new. Once #571 is merged i'll update the PR with that.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "lint error in the travis CI tests.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "STEP: executing a command with run --rm and attach with stdin\r\nJun 9 18:48:45.944: INFO: Running '/home/amurdaca/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/var/run/kubernetes/admin.kubeconfig --namespace=e2e-tests-kubectl-fd7qc run e2e-test-rm-busybox-job --image=gcr.io/google_containers/busybox:1.24 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed''", + "kubectl run e2e-test --image=gcr.io/google_containers/busybox:1.24 --rm=true -- generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed'", + " alexlarsson, I am getting a Failed to create container in the logs when I run the command\r\n ./cluster/kubectl.sh run e2e-test --image=gcr.io/google_containers/busybox:1.24 -- generator=job/v1 --restart=Never --attach=true --stdin -- sh -c cat && echo 'stdin closed'" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "kube-1-8-x", + "cherry-picked" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/579", + "sourceRepo": "cri-o/cri-o", + "reactions": 5, + "comments": 37 + }, + "security": { + "scannedAt": "2026-02-27T17:45:15.476Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-5811-remove-the-external-dependency-on-the-conntrack-binary.json b/solutions/cncf-generated/cri-o/cri-o-5811-remove-the-external-dependency-on-the-conntrack-binary.json new file mode 100644 index 00000000..b529a66c --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-5811-remove-the-external-dependency-on-the-conntrack-binary.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:32.945Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: remove the external dependency on the conntrack binary", + "description": "We can use netlink call directly\n\nSigned-off-by: Antonio Ojea \n\nFixes: #5807\n\n/kind bug\n\n```release-note\ncrio no longer requires the conntrack binary\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@aojea can you rebase to get the latest CI fixes?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "kind-bug", + "lgtm", + "approved", + "release-note", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/5811", + "sourceRepo": "cri-o/cri-o", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:32.945Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-7719-add-support-for-oci-artifact-seccomp-profiles.json b/solutions/cncf-generated/cri-o/cri-o-7719-add-support-for-oci-artifact-seccomp-profiles.json new file mode 100644 index 00000000..345b75e2 --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-7719-add-support-for-oci-artifact-seccomp-profiles.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:17.712Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: Add support for OCI artifact seccomp profiles", + "description": "#### What type of PR is this?\n\n/kind feature\n\n#### What this PR does / why we need it:\n\nThis feature allows pulling seccomp profiles using OCI artifacts directly within CRI-O.\nThe new annotation `seccomp-profile.kubernetes.cri-o.io` can be used to:\n\n- Use a seccomp profile for a whole container image. This works if if it got build with the annotation, for example:\n ```shell\n podman build \\\n --annotation seccomp-profile.kubernetes.cri-o.io/POD=quay.io/crio/seccomp:v1 \\\n -t quay.io/crio/nginx-seccomp .\n ```\n- Use a seccomp profile for every container within a pod:\n ```yaml\n apiVersion: v1\n kind: Pod\n metadata:\n name: pod\n annotations:\n seccomp-profile.kubernetes.cri-o.io/POD: quay.io/crio/seccomp:v1\n spec: …\n ```\n- Use a seccomp profile for a specific container within a pod:\n ```yaml\n apiVersion: v1\n kind: Pod\n metadata:\n name: pod\n annotations:\n seccomp-profile.kubernetes.cri-o.io/container: quay.io/crio/seccomp:v1\n spec:\n containers:\n - n", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/retest", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "podman build \\\r\n --annotation seccomp-profile.kubernetes.cri-o.io/POD=quay.io/crio/seccomp:v1 \\\r\n -t quay.io/crio/nginx-seccomp .", + "apiVersion: v1\r\n kind: Pod\r\n metadata:\r\n name: pod\r\n annotations:\r\n seccomp-profile.kubernetes.cri-o.io/POD: quay.io/crio/seccomp:v1\r\n spec: …", + "apiVersion: v1\r\n kind: Pod\r\n metadata:\r\n name: pod\r\n annotations:\r\n seccomp-profile.kubernetes.cri-o.io/container: quay.io/crio/seccomp:v1\r\n spec:\r\n containers:\r\n - name: container\r\n image: …" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "kind-feature", + "lgtm", + "approved", + "release-note", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/7719", + "sourceRepo": "cri-o/cri-o", + "reactions": 2, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:45:17.712Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-7918-patch-release-workflow-and-script.json b/solutions/cncf-generated/cri-o/cri-o-7918-patch-release-workflow-and-script.json new file mode 100644 index 00000000..f0c326a1 --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-7918-patch-release-workflow-and-script.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:16.614Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: Patch release workflow and script", + "description": "#### What type of PR is this?\n/kind feature\n\n#### What this PR does / why we need it:\n- Initial work on the automated releases for cri-o, focusing on patch releases for now\n\n#### Which issue(s) this PR fixes:\nFixes #4003\n\n#### Special notes for your reviewer:\n- add the github action workflow\n- reuse the existing release script\n#### Does this PR introduce a user-facing change?\n\n```release-note\nNone\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Hi @olad5. Thanks for your PR.\n\nI'm waiting for a [cri-o](https://github.com/orgs/cri-o/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/cri-o/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=cri-o%2Fcri-o).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\n/ok-to-test\n/ok-to-test\n## [Codecov](https://app.codecov.io/gh/cri-o/cri-o/pull/7918?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=cri-o) Report\n> Merging [#7918](https://app.codecov.io/gh/cri-o/cri-o/pull/7918?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=cri-o) (a63bc4d) into [main](https://app.codecov.io/gh/cri-o/cri-o/commit/73ffdcea4e4935ae42efef5ae09498a9f8a77669?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=cri-o) (73ffdce) will **decrease** coverage by `0.27%`.\n> Report is 6 commits behind head on main.\n> The diff coverage is `5.10%`.\n\n
Additional details and impacted files", + "
\n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *olad5*, *saschagrunert*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=cri-o%2Fcri-o).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[OWNERS](https://github.com/cri-o/cri-o/blob/main/OWNERS)~~ [saschagrunert]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n/assign olad5\n/retest \n/override ci/prow/ci-cgroupv2-e2e-crun\n@saschagrunert: Overrode contexts on behalf of saschagrunert: ci/prow/ci-cgroupv2-e2e-crun\n\n
\n\nIn response to [this](https://github.com/cri-o/cri-o/pull/7918#issuecomment-2033934614):\n\n>/override ci/prow/ci-cgroupv2-e2e-crun\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n/override ci/prow/e2e-aws-ovn\n@saschagrunert: Overrode contexts on behalf of saschagrunert: ci/prow/e2e-aws-ovn\n\n
\n\nIn response to [this](https://github.com/cri-o/cri-o/pull/7918#issuecomment-2034326350):\n\n>/override ci/prow/e2e-aws-ovn\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n/cherry-pick release-1.29\n@saschagrunert: #7918 failed to apply on top of branch \"release-1.29\":" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "kind-feature", + "lgtm", + "approved", + "release-note-none", + "ok-to-test", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/7918", + "sourceRepo": "cri-o/cri-o", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:45:16.614Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-9537-migrate-annotations-to-kubernetes-recommended-naming-conventions.json b/solutions/cncf-generated/cri-o/cri-o-9537-migrate-annotations-to-kubernetes-recommended-naming-conventions.json new file mode 100644 index 00000000..62c8fae9 --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-9537-migrate-annotations-to-kubernetes-recommended-naming-conventions.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:18.950Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "cri-o: Migrate annotations to Kubernetes-recommended naming conventions", + "description": "#### What type of PR is this?\n\n/kind feature\n\n#### What this PR does / why we need it:\n\nMigrates CRI-O annotations from legacy `io.kubernetes.cri-o.*` to Kubernetes-recommended `*.crio.io` format with full backward compatibility.\n\n**Key Changes:**\n\n- **V2 Package (`pkg/annotations/v2`)**: Complete successor containing all CRI-O annotations\n - 15 annotations migrated from V1 with automatic fallback\n - 8 existing annotations consolidated for consistency\n - `GetAnnotationValue()` provides transparent V1/V2 support\n \n- **Internal Separation**: Moved internal state annotations to `internal/annotations`\n\n- **Standardized Format**: Container-specific annotations use `/` separator (e.g., `seccomp-profile.crio.io/POD`)\n\n**Example Migration:**\n\n```yaml\n# Old (deprecated, still works)\nio.kubernetes.cri-o.userns-mode: \"auto\"\n\n# New (recommended)\nuserns-mode.crio.io: \"auto\"\n```\n\nSee `ANNOTATION_MIGRATION.md` for complete mapping table and migration guide.\n\n#### Which issue(s) this PR fixes:\n\nFi", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@cri-o/cri-o-maintainers PTAL", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# Old (deprecated, still works)\nio.kubernetes.cri-o.userns-mode: \"auto\"\n\n# New (recommended)\nuserns-mode.crio.io: \"auto\"", + "\n## Summary by CodeRabbit\n\n* **Documentation**\n * Added a comprehensive Annotation Migration guide and README links describing the new annotation format, migration timeline, examples, precedence rules, and developer guidance.\n\n* **Improvements**\n * Adopted a versioned annotation model (V2 preferred with V1 fallbacks) across the runtime and tooling; annotation retrieval now prioritizes V2.\n\n* **Tests**\n * Added unit and extensive integration tests covering V2/V1 precedence, container-specific keys, and many runtime scenarios.\n\n* **Chores**\n * CI variables and skip lists updated to manage annotation migration tests.\n\n✏️ Tip: You can customize this high-level summary in your review settings.\n\n\n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *saschagrunert*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=cri-o%2Fcri-o).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[OWNERS](https://github.com/cri-o/cri-o/blob/main/OWNERS)~~ [saschagrunert]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\nCreating a new package, something like annotations/v2, may be better, I guess.\r\nWe don't have to put V2 suffix on each annotations, and there will be no inconsistency between \"*V2\" metrics and already canonicalized annotations.\r\n\r\nAlso this may be out of scope, some variables has \"Annotation\" suffix, which is not necessary. We can remove that now.\n@cri-o/cri-o-maintainers PTAL\n## [Codecov](https://app.codecov.io/gh/cri-o/cri-o/pull/9537?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=cri-o) Report\n:x: Patch coverage is `76.35135%` with `35 lines` in your changes missing coverage. Please review.\n:white_check_mark: Project coverage is 66.95%. Comparing base ([`dde2e90`](https://app.codecov.io/gh/cri-o/cri-o/commit/dde2e90ab39529015d2a263c1ec2c15e09d2c929?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=cri-o)) to head ([`c2ebfba`](https://app.codecov.io/gh/cri-o/cri-o/commit/c2ebfbac1e57716355cfe0eb4b886acb04cd3c01?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=cri-o)).\n:warning: Report is 8 commits behind head on main.\n\n
Additional details and impacted files" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "kind-feature", + "lgtm", + "approved", + "release-note", + "dco-signoff--yes" + ], + "category": "runtime", + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/cri-o/cri-o/pull/9537", + "sourceRepo": "cri-o/cri-o", + "reactions": 2, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:45:18.950Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-2385-generic-references-design-doc.json b/solutions/cncf-generated/crossplane/crossplane-2385-generic-references-design-doc.json new file mode 100644 index 00000000..f9d386f7 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-2385-generic-references-design-doc.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:39.010Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "crossplane: Generic references design doc", + "description": "### Description of your changes\n\nThe design doc for generic references which also addresses common data source problem.\n\nFixes https://github.com/crossplane/crossplane/issues/1770\nFixes https://github.com/crossplane/crossplane/issues/2099\n\nI have:\n\n- [x] Read and followed Crossplane's [contribution process].\n- [x] Run `make reviewable` to ensure this PR is ready for review.\n- [x] Added `backport release-x.y` labels to auto-backport this PR if necessary.\n\n### How has this code been tested\n\nN/A.\n\n[contribution process]: https://git.io/fj2m9", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "New features:\n- Allow user to define resource management policy to instruct the provider how to manage Kubernetes resources in a fine-grained manner.\n- Allow user to define resource references for an Object as dependencies to retrieve values from dependent resources at runtime and guarantee the resource rendering in a specified order.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/crossplane/crossplane/pull/2385", + "sourceRepo": "crossplane/crossplane", + "reactions": 11, + "comments": 3 + }, + "security": { + "scannedAt": "2026-02-27T17:45:39.010Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-3007-feat-compositions-support-patching-from-environment.json b/solutions/cncf-generated/crossplane/crossplane-3007-feat-compositions-support-patching-from-environment.json new file mode 100644 index 00000000..ab57cb12 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-3007-feat-compositions-support-patching-from-environment.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:37.440Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "crossplane: feat(compositions): Support patching from Environment", + "description": "### Description of your changes\n\nDesign: https://github.com/crossplane/crossplane/pull/3008\n\nFixes: #2099 \n\nThis adds a new cluster scope CRD `EnvironmentConfig` that is similar to a standard `ConfigMap` but allows storing arbitrary values instead of just strings.\n\n`EnvironmentConfig`s can be used in compositions using the new `FromEnvironmentFieldPath` patch type.\n\nThe main use case of this PR is to use to build a generic Crossplane package that can run in multiple environment without having to hardcode environment specific values in the compositions.\n\nThis is a replacement for #2938. Based on an idea briefly mentioned by @negz in the community meeting.\n\nIt avoids the discussed security and RBAC issues by not requiring access to secrets and config maps and relying on Crossplane native resources instead.\n\nI have:\n\n- [x] Read and followed Crossplane's [contribution process].\n- [x] Run `make reviewable` to ensure this PR is ready for review.\n- [ ] Added `backport release-x.y` labels to a", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "### Description of your changes\n\nThis design proposal aims to provide a solution for adding a third data source for composition patches next to claim/composite and composed resource by introducing a new resource `EnvironmentConfig`.\n\nIt is _not_ the goal of this design document to provide a way for patching from any Kubernetes object (aka generic referencers).\n\nCC @negz \n\nRelated to https://github.com/crossplane/crossplane/issues/2099\n\nImplementation: https://github.com/crossplane/crossplane/pull/3007\n\nI have:\n\n- [x] Read and followed Crossplane's [contribution process].\n- [ ] Run `make reviewable` to ensure this PR is ready for review.\n- [ ] Added `backport release-x.y` labels to auto-backport this PR if necessary.\n\n### How has this code been tested\n\nn.a.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "resources:\r\n - base:\r\n apiVersion: rancher2.rancher.jet.crossplane.io/v1alpha1\r\n kind: Project\r\n spec:\r\n forProvider:\r\n clusterId: ?\r\n patches:\r\n - toFieldPath: spec.forProvider.clusterId\r\n from: ", + "apiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nspec:\r\n generators:\r\n - git:\r\n repoURL: https://github.com/argoproj/applicationset.git\r\n revision: HEAD\r\n directories:\r\n - path: examples/git-generator-directory/cluster-addons/*\r\n template:\r\n spec:\r\n destination:\r\n namespace: '{{path.basename}}'", + "apiVersion: argoproj.io/v1alpha1\r\nkind: Application\r\nspec:\r\n destination:\r\n namespace: 'argo-workflows'" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Configmap", + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/crossplane/crossplane/pull/3007", + "sourceRepo": "crossplane/crossplane", + "reactions": 14, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:45:37.440Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-3939-proposal-break-up-large-providers-by-service.json b/solutions/cncf-generated/crossplane/crossplane-3939-proposal-break-up-large-providers-by-service.json new file mode 100644 index 00000000..4ca52965 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-3939-proposal-break-up-large-providers-by-service.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:42.412Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "crossplane: Proposal: Break Up Large Providers by Service", + "description": "### Description of your changes\n\nFixes #3754\n\nThis design document proposes that the 6-7 largest Crossplane providers be broken down into smaller, service-scoped ones. This would help folks install fewer CRDs, thus improving the ratio of installed-to-used Crossplane CRDs. Installing fewer CRDs is necessary to workaround performance issues in the Kubernetes API server and Kubernetes clients.\n\nI have:\n\n- [x] Read and followed Crossplane's [contribution process].\n- [ ] Run `make reviewable` to ensure this PR is ready for review.\n- [ ] Added `backport release-x.y` labels to auto-backport this PR if necessary.\n\n### How has this code been tested\n\n[contribution process]: https://git.io/fj2m9\n\nI proof-read it. 😄", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "### Description of your changes\n\nAs a prove of concept for filtering crds when installing providers we updated Provider spec to include two new array fields `includeCrds` and `excludeCrds` that can contain regex values:\n\n```yaml\napiVersion: pkg.crossplane.io/v1\nkind: Provider\nmetadata:\n name: crossplane-aws\nspec:\n controllerConfigRef:\n name: crossplane-aws\n ignoreCrossplaneConstraints: false\n package: #see pr in crossplane-contrib/aws https://github.com/crossplane-contrib/provider-aws/pull/1727\n packagePullPolicy: IfNotPresent\n revisionActivationPolicy: Automatic\n revisionHistoryLimit: 1\n skipDependencyResolution: false\n excludeCrds:\n - \\.aws\\.crossplane\\.io\n includeCrds:\n - securitygroup\\.ec2\\.aws\\.crossplane\\.io\n - securitygrouprules\\.ec2\\.aws\\.crossplane\\.io\n - \\.eks\\.aws\\.crossplane\\.io\n\n```\nThis poc avoids the proposal of breaking larges providers by service/group/type and shows the possibility to filter applied crds.\n\nWhen applying a provider with these optional fields set, the list of applied crd gets filtered. If a crd matches a value in excludeCrds, it is ignored, unless it matches a value in includeCrds. Crds that are not matched by values in excludeCrds get applied. If excludeCrds is empty, all crds will be applied.\n\nThe deployment for the provider is created with an additional env variable that is used by the provider to filter the activated controllers.\n\nThis is described and can be seen in the PR in crosplane-contrib/provider-aws: https:", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: pkg.crossplane.io/v1\r\nkind: Provider\r\nmetadata:\r\n name: crossplane-aws\r\nspec:\r\n controllerConfigRef:\r\n name: crossplane-aws\r\n ignoreCrossplaneConstraints: false\r\n package: #see pr in crossplane-contrib/aws https://github.com/crossplane-contrib/provider-aws/pull/1727\r\n packagePullPolicy: IfNotPresent\r\n revisionActivationPolicy: Automatic\r\n revisionHistoryLimit: 1\r\n skipDependencyResolution: false\r\n excludeCrds:\r\n - \\.aws\\.crossplane\\.io\r\n includeCrds:\r\n - securitygroup\\.ec2\\.aws\\.crossplane\\.io\r\n - securitygrouprules\\.ec2\\.aws\\.crossplane\\.io\r\n - \\.eks\\.aws\\.crossplane\\.io" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition", + "performance", + "proposal", + "crd-count" + ], + "category": "workloads", + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/crossplane/crossplane/pull/3939", + "sourceRepo": "crossplane/crossplane", + "reactions": 10, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:45:42.412Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-4444-add-usage-type-for-deletion-ordering-and-resource-protection.json b/solutions/cncf-generated/crossplane/crossplane-4444-add-usage-type-for-deletion-ordering-and-resource-protection.json new file mode 100644 index 00000000..821383e0 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-4444-add-usage-type-for-deletion-ordering-and-resource-protection.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:44.500Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "crossplane: Add `Usage` type for Deletion Ordering and Resource Protection", + "description": "### Description of your changes\n\nThis PR implements the `Usage` type proposed in [this one-pager](https://github.com/crossplane/crossplane/blob/master/design/one-pager-generic-usage-type.md) as an alpha feature.\n\nThis would enable ordered deletions when there is a usage dependency between composed resources. A typical example is having a helm `Release` together with a GKE `Cluster` resource where the release is installed into the cluster. Currently, we end up orphaned `Release` resources when the `Cluster` is gone before the release is properly uninstalled. With this PR, it would be possible to include the following `Usage` resource in the same composition to prevent the `Cluster` deletion before the `Release` is gone. \n\n```yaml\n resources:\n - name: cluster\n base:\n apiVersion: container.gcp.upbound.io/v1beta1\n kind: Cluster\n ...\n - name: release\n base:\n apiVersion: helm.crossplane.io/v1beta1\n kind: Release\n ...\n - name: releas", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "### Description of your changes\n\nThis PR adds additional methods to the unstructured composed package.\nThese methods will be consumed by the Usage implementation as suggested [here](https://github.com/crossplane/crossplane/pull/4444#discussion_r1290648915).\n\nI have:\n\n- [x] Read and followed Crossplane's [contribution process].\n- [x] Run `make reviewable test` to ensure this PR is ready for review.\n\n### How has this code been tested\n\nConsume this PR on top of https://github.com/crossplane/crossplane/pull/4444 and run:\n\n```\nmake e2e E2E_TEST_FLAGS=\"-test.v --test-suite usage\"\n```\n\n[contribution process]: https://git.io/fj2m9", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "resources:\r\n - name: cluster\r\n base:\r\n apiVersion: container.gcp.upbound.io/v1beta1\r\n kind: Cluster\r\n ...\r\n - name: release\r\n base:\r\n apiVersion: helm.crossplane.io/v1beta1\r\n kind: Release\r\n ...\r\n - name: release-uses-cluster\r\n base:\r\n apiVersion: apiextensions.crossplane.io/v1alpha1\r\n kind: Usage\r\n spec:\r\n of:\r\n apiVersion: container.gcp.upbound.io/v1beta1\r\n kind: Cluster\r\n resourceSelector:\r\n matchControllerRef: true\r\n by:\r\n apiVersion: helm.crossplane.io/v1beta1\r\n kind: Release\r\n resourceSelector:\r\n matchControllerRef: true", + "helm repo add crossplane-master https://charts.crossplane.io/master --force-update\r\nhelm upgrade --install crossplane --namespace crossplane-system crossplane-master/crossplane --version v1.14.0-rc.0.190.g78bdab10 --create-namespace --set image.repository=turkenh/crossplane --set image.tag=v1.14.0-rc.0.198.g9049ab9c --set \"args={--debug,--enable-usages}\"", + "❯ cd test/e2e/manifests/apiextensions/usage/standalone/\r\n❯ kubectl apply -f setup/\r\nprovider.pkg.crossplane.io/provider-nop created\r\n❯ kubectl wait provider.pkg provider-nop --for condition=healthy --timeout 2m\r\nprovider.pkg.crossplane.io/provider-nop condition met\r\n❯ kubectl apply -f with-by/\r\nusage.apiextensions.crossplane.io/using-uses-used created\r\nnopresource.nop.crossplane.io/used-resource created\r\nnopresource.nop.crossplane.io/using-resource created\r\n❯ kubectl apply -f with-reason/\r\nusage.apiextensions.crossplane.io/protect-a-resource created\r\nnopresource.nop.crossplane.io/protected-resource created\r\n❯ kubectl get usages\r\nNAME DETAILS READY AGE\r\nprotect-a-resource This resource is protected! True 15s\r\nusing-uses-used NopResource/using-resource uses NopResource/used-resource True 19s\r\n❯ kubectl delete -f with-by/used.yaml\r\nError from server (This resource is in-use by 1 Usage(s), including the Usage \"using-uses-used\" by resource NopResource/using-resource.): error when deleting \"with-by/used.yaml\": admission webhook \"nousages.apiextensions.crossplane.io\" denied the request: This resource is in-use by 1 Usage(s), including the Usage \"using-uses-used\" by resource NopResource/using-resource.\r\n❯ kubectl delete -f with-reason/used.yaml\r\nError from server (This resource is in-use by 1 Usage(s), including the Usage \"protect-a-resource\" with reason: \"This resource is protected!\".): error when deleting \"with-reason/used.yaml\": admission webhook \"nousages.apiextensions.crossplane.io\" denied the request: This resource is in-use by 1 Usage(s), including the Usage \"protect-a-resource\" with reason: \"This resource is protected!\"." + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/crossplane/crossplane/pull/4444", + "sourceRepo": "crossplane/crossplane", + "reactions": 6, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:44.500Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-4744-beta-support-for-package-runtime-config.json b/solutions/cncf-generated/crossplane/crossplane-4744-beta-support-for-package-runtime-config.json new file mode 100644 index 00000000..e7a203aa --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-4744-beta-support-for-package-runtime-config.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:35.685Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "crossplane: Beta support for Package Runtime Config", + "description": "### Description of your changes\n\nThis PR implements Package Runtime Config (Fixes #4699) as proposed in the [design doc](https://github.com/crossplane/crossplane/blob/2c5e7f07ba9e3d83d1c85169bbde685de8514ab8/design/one-pager-package-runtime-config.md) with the following exception:\n\nInstead of initializing and maintaining our defaults like `replicas: 1` or pod and runtime container `securityContext` in the `DeploymentRuntimeConfig` named `default`, it only initializes an empty `DeploymentRuntimeConfig` named as `default` _only if it does not exist_. It was discussed in [the issue](https://github.com/crossplane/crossplane/issues/4699#issuecomment-1748369504) in details but this is primarily to avoid the following:\n\n- Creating a new `DeploymentRuntimeConfig` would require duplicating the values from the `default` one, assuming the user won't want to lose them. If you only intend to include additional labels, you would need to duplicate the entire default configuration and append labels to", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "### Description of your changes\n\nThis PR: \n\n- Refactors existing hooks and manifest generation with a ManifestBuilder.\n- Introduces `PackageWithRuntime` and `PackageRevisionWithRuntime` interfaces.\n- Uses specific package types (Fixes #4690).\n- Cleans up the unused `controllerRef` from PackageRevisionStatus.\n\nP.S: I wanted to have a dedicated PR for the work here instead of combining it with Package Runtime Config [Implementation](https://github.com/crossplane/crossplane/pull/4744). I'll rebase that PR on top of this.\n\nI have:\n\n- [x] Read and followed Crossplane's [contribution process].\n- [x] Added or updated unit **and** E2E tests for my change.\n- [x] Run `make reviewable` to ensure this PR is ready for review.\n- [ ] ~Added `backport release-x.y` labels to auto-backport this PR, if necessary.~\n- [ ] ~Opened a PR updating the [docs], if necessary.~\n\n[contribution process]: https://git.io/fj2m9\n[docs]: https://docs.crossplane.io/contribute/contribute", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Pod", + "Deployment" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/crossplane/crossplane/pull/4744", + "sourceRepo": "crossplane/crossplane", + "reactions": 17, + "comments": 0 + }, + "security": { + "scannedAt": "2026-02-27T17:45:35.685Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-4885-add-a-one-pager-guiding-function-build-tooling.json b/solutions/cncf-generated/crossplane/crossplane-4885-add-a-one-pager-guiding-function-build-tooling.json new file mode 100644 index 00000000..d7ca9400 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-4885-add-a-one-pager-guiding-function-build-tooling.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:45.990Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "crossplane: Add a one-pager guiding Function build tooling", + "description": "### Description of your changes\n\nFixes https://github.com/crossplane/crossplane/issues/4796\n\nI think as we build templates for languages like Go, Python, and TypeScript it will be useful to have a framework/philosophy for our patterns and tooling choices. The ultimate goal here is to make building a Function in a language you already know and work with regularly a familiar, unintimidating experience.\n\nSee https://github.com/crossplane/function-template-go/pull/23 for a reference implementation.\n\nI have: \n\n- [x] Read and followed Crossplane's [contribution process].\n- [ ] ~Run `make reviewable` to ensure this PR is ready for review.~\n- [ ] ~Added or updated unit tests.~\n- [ ] ~Added or updated e2e tests.~\n- [ ] ~Linked a PR or a [docs tracking issue] to [document this change].~\n- [ ] ~Added `backport release-x.y` labels to auto-backport this PR.~\n\nNeed help with this checklist? See the [cheat sheet].\n\n[contribution process]: https://github.com/crossplane/crossplane/tree/master/contribut", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "### Description of your changes\n\nSee https://github.com/crossplane/crossplane/pull/4885 for the framework/philosophy behind this implementation.\n\nCloses https://github.com/crossplane/function-template-go/pull/16\n\nThis PR was heavily influenced by #16 (thanks @pedjak). The main differences are:\n\n* Avoid introducing a Makefile.\n* Don't test and lint inside the multi-stage build\n\nThese differences both come from a goal to keep build and CI as simple and lightweight as possible:\n\n* I don't think the learning curve of Make is worth the value it brings (`make test` doesn't add much vs `go test`)\n* I think realistically folks are going to need the Go toolchain installed to develop a Function so running `go test` inside Docker is worth the price of a more complicated / hard to follow Dockerfile.\n\nI have:\n\n- [x] Read and followed Crossplane's [contribution process].\n- [ ] ~Added or updated unit tests for my change.~\n\n[contribution process]: https://git.io/fj2m9\n[docs]: https://docs.crossplane.io/contribute/contribute", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/crossplane/crossplane/pull/4885", + "sourceRepo": "crossplane/crossplane", + "reactions": 5, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:45.990Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-5540-feat-configurable-ports.json b/solutions/cncf-generated/crossplane/crossplane-5540-feat-configurable-ports.json new file mode 100644 index 00000000..c0c6f3e2 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-5540-feat-configurable-ports.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:40.729Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "crossplane: feat/configurable ports", + "description": "### Description of your changes\n\nWhen running crossplane (and providers/functions) in the host network the need to configure ports arises.\n\nThis MR has two parts:\n\n1. Make crossplane ports (service/webhook, health probe, metrics) configurable\n2. Allow `DeploymentRuntimeConfig` to override the default ports (`metrics`, `webhook`, `grpc`) in both `Deployment` and `Service`.\n\nAdditional merge requests would be necessary to\n\n- Adjust the documentation\n- Adjust the provider and function templates (to introduce configuration options for their ports)\n- Providers / Functions (to introduce configuration options for their ports)\n\nPutting this on draft to gather your feedback. Please let me know what you think about it and how to proceed with the follow ups.\n\nContributes to #5520\n\n#### Crossplane Ports\n\nThe following configuration options (flags / environment variables) where added, with their defaults set to the current values:\n\n- `--webhook-port=9443`\n- `--metrics-bind-address=:8080`\n- `--healt", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "### Description of your changes\n\nAs outlined in the issue below, when running Crossplane in certain environments (like EKS using Calico CNI), where the control plane resides outside the workload network it is necessary to run webhooks in the host network. This calls for the need to make the port configurable to avoid conflicts.\n\nI opened [crossplane/crossplane!5540](https://github.com/crossplane/crossplane/pull/5540) to allow a `DeploymentRuntimeConfig` to overwrite the ports of the crossplane managed services created for each provider / function.\n\nFixes #208\n\nI have:\n\n- [x] Read and followed Crossplane's [contribution process].\n- [x] Run `make reviewable test` to ensure this PR is ready for review.\n\n### How has this code been tested\n\nI deployed my fork of this and crossplane in our test cluster running EKS and Calico. With this I was able to run both Crossplane and provider-kubernetes inside the host network with custom ports.\n\nThis change only makes sense if my other PR gets accepted.\n\n[contribution process]: https://git.io/fj2m9", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# K8s provider\r\napiVersion: pkg.crossplane.io/v1\r\nkind: Provider\r\nmetadata:\r\n name: provider-kubernetes\r\nspec:\r\n package: \"index.docker.io/crossplanecontrib/provider-kubernetes:v0.15.0\"\r\n runtimeConfigRef:\r\n name: provider-kubernetes\r\n---\r\napiVersion: pkg.crossplane.io/v1beta1\r\nkind: DeploymentRuntimeConfig\r\nmetadata:\r\n name: provider-kubernetes\r\nspec:\r\n deploymentTemplate:\r\n spec:\r\n selector: { }\r\n template:\r\n spec:\r\n hostNetwork: true\r\n nodeSelector:\r\n kubernetes.io/os: linux\r\n imagePullSecrets:\r\n - name: crossplane-workaround\r\n containers:\r\n - name: package-runtime\r\n args:\r\n - --webhook-port=9610\r\n serviceTemplate:\r\n spec:\r\n ports:\r\n - name: webhook\r\n port: 9200 # doesn't matter\r\n targetPort: 9610\r\n protocol: TCP", + "apiVersion: pkg.crossplane.io/v1beta1\r\nkind: Function\r\nmetadata:\r\n name: function-patch-and-transform\r\nspec:\r\n package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0\r\n runtimeConfigRef:\r\n name: function-patch-and-transform\r\n---\r\napiVersion: pkg.crossplane.io/v1beta1\r\nkind: DeploymentRuntimeConfig\r\nmetadata:\r\n name: function-patch-and-transform\r\nspec:\r\n deploymentTemplate:\r\n spec:\r\n selector: { }\r\n template:\r\n spec:\r\n hostNetwork: true\r\n containers:\r\n - name: package-runtime\r\n args:\r\n - --address=:9612\r\n serviceTemplate:\r\n spec:\r\n ports:\r\n - name: webhook\r\n port: 9200 # doesn't matter\r\n targetPort: 9612\r\n protocol: TCP", + "hostNetwork: true\r\ndnsPolicy: \"ClusterFirstWithHostNet\"\r\nwebhooks:\r\n port: 9600\r\nmetrics:\r\n enabled: true\r\n port: 9601\r\nreadiness:\r\n port: 9602" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/crossplane/crossplane/pull/5540", + "sourceRepo": "crossplane/crossplane", + "reactions": 11, + "comments": 28 + }, + "security": { + "scannedAt": "2026-02-27T17:45:40.729Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-6161-support-rendering-default-values-from-xrd-in-crossplane-cli.json b/solutions/cncf-generated/crossplane/crossplane-6161-support-rendering-default-values-from-xrd-in-crossplane-cli.json new file mode 100644 index 00000000..e44a23f2 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-6161-support-rendering-default-values-from-xrd-in-crossplane-cli.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:46.990Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "crossplane: Support Rendering Default Values from XRD in Crossplane CLI", + "description": "### Description of your changes\n\nWhen rendering Crossplane functions, the system currently only displays values explicitly set by users in their Composite Resources (XRs). However, default values defined in the Composite Resource Definition (XRD) schema are not being included in the rendered output. This PR introduces functionality to merge default values from XRD schemas into the rendered output.\n\nFixes #5361 \n\nI have: \n\n- [x] Read and followed Crossplane's [contribution process].\n- [x] Run `earthly +reviewable` to ensure this PR is ready for review.\n- [x] Added or updated unit tests.\n- [ ] ~Added or updated e2e tests.~\n- [x] Linked a PR or a [docs tracking issue] to [document this change].\n- [ ] ~Added `backport release-x.y` labels to auto-backport this PR.~\n\nNeed help with this checklist? See the [cheat sheet].\n\n[contribution process]: https://github.com/crossplane/crossplane/tree/main/contributing\n[docs tracking issue]: https://github.com/crossplane/docs/issues/new\n[document this c", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thank you @man-ish-k for your patience and your efforts on this contribution! i'd love for us to be able to take a look soon, but in the meantime I just wanted to thank you for what you've done so far 🙇‍♂️", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "-x, --include-full-xr Include a direct copy of the input XR's spec\r\n and metadata fields in the rendered output." + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/crossplane/crossplane/pull/6161", + "sourceRepo": "crossplane/crossplane", + "reactions": 5, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:46.990Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dapr/dapr-1059-enable-get-for-secret-stores-to-dynamically-fetch-secrets.json b/solutions/cncf-generated/dapr/dapr-1059-enable-get-for-secret-stores-to-dynamically-fetch-secrets.json new file mode 100644 index 00000000..3c726b39 --- /dev/null +++ b/solutions/cncf-generated/dapr/dapr-1059-enable-get-for-secret-stores-to-dynamically-fetch-secrets.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:58.535Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dapr: Enable GET for secret stores to dynamically fetch secrets", + "description": "# Description\n\nEnabling GET for secret stores to dynamically fetch secrets as described in #1034. Current work just enables the http endpoint.\n\n## Issue reference\n\nWe strive to have all PR being opened based on an issue, where the problem or feature have been discussed prior to implementation.\n\nPlease reference the issue this PR will close: #1034\n\n## Checklist\n\nPlease make sure you've completed the relevant tasks for this PR, out of the following list:\n\n* [x] Code compiles correctly\n* [ ] Created/updated tests\n* [x] Unit tests passing\n* [ ] End-to-end tests passing\n* [ ] Extended the documentation\n* [ ] Specification has been updated\n* [ ] Provided sample for the feature", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Great to see this! Can you please resolve conflicts?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "dapr", + "graduated", + "app-definition", + "do-not-merge" + ], + "category": "workloads", + "cncfProjects": [ + "dapr" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dapr/dapr/pull/1059", + "sourceRepo": "dapr/dapr", + "reactions": 2, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:45:58.535Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dapr/dapr-1681-initial-commit-of-state-store-transactional-api.json b/solutions/cncf-generated/dapr/dapr-1681-initial-commit-of-state-store-transactional-api.json new file mode 100644 index 00000000..a1f9324f --- /dev/null +++ b/solutions/cncf-generated/dapr/dapr-1681-initial-commit-of-state-store-transactional-api.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:54.681Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dapr: Initial commit of state store transactional API", + "description": "# Description\n\nI noticed a good first issue with a closed incomplete [PR](https://github.com/dapr/dapr/pull/931)\nThus I created a new PR and referenced the previous PR.\n\n## Things to Note\n1) https://github.com/dapr/components-contrib/issues/375 If you upsert and delete and upsert the key, a GetRequest will return nothing because Get Request executes a HGETALL first before using GET. HGETALL returns empty value or or none which is set by the other test via HSET.\n2) I also converted the values from bytes to string in the gRPC implementation since Redis inserts an string representation of integers and string.\n\n## Issue reference\n\nWe strive to have all PR being opened based on an issue, where the problem or feature have been discussed prior to implementation.\n\nPlease reference the issue this PR will close: #924 \n\n## Checklist\n\nPlease make sure you've completed the relevant tasks for this PR, out of the following list:\n\n* [x] Code compiles correctly\n* [x] Created/updated tests\n* [x] Unit t", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Found the available test cluster - dapr-aks-e2e-01. Please wait until test is done.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "go mod edit -replace github.com/dapr/dapr=github.com/chinzhiweiblank/dapr@96e0963a61eba12b2fe5f3715ed2b530b212b1bc" + ] + } + }, + "metadata": { + "tags": [ + "dapr", + "graduated", + "app-definition", + "do-not-merge" + ], + "category": "workloads", + "cncfProjects": [ + "dapr" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/dapr/dapr/pull/1681", + "sourceRepo": "dapr/dapr", + "reactions": 3, + "comments": 79 + }, + "security": { + "scannedAt": "2026-02-27T17:45:54.681Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dapr/dapr-1864-add-windows-containers-to-the-docker-multiarch-manifest.json b/solutions/cncf-generated/dapr/dapr-1864-add-windows-containers-to-the-docker-multiarch-manifest.json new file mode 100644 index 00000000..56448009 --- /dev/null +++ b/solutions/cncf-generated/dapr/dapr-1864-add-windows-containers-to-the-docker-multiarch-manifest.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:59.656Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dapr: Add windows containers to the docker multiarch manifest", + "description": "# Description\n\nIt turns out, you can build a docker multiplatfrom image across both windows and linux. This means we can deliver a cross platform sidecar injector simply by creating a daprd windows container and letting docker automatically pull the right one based on the os/architecture of the kubernetes node the user app happens to be deployed to.\n\nPrior to this change, we would build each set of binaries separately, then copy them all to one job to do a docker buildx to create the multiplatform images. Since buildx does not support windows, we'll now build docker containers on their respective host types (linux/amd64 linux/arm windows/amd64), tag them with the suffix version-os-arch and push them. Then, another job will come along and do a dapr manifest build version version-linux-amd64 version-windows-amd64 ...\n\n## Issue reference\n\nWe strive to have all PR being opened based on an issue, where the problem or feature have been discussed prior to implementation.\n\nPlease reference the", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Great stuff.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "dapr % kubectl set env deployment/dapr-sidecar-injector SIDECAR_IMAGE=docker.io/wcs1only/daprd:edge --namespace=test\r\n\r\ndapr % kubectl describe pod `kubectl get pods --namespace=test | awk '/add/ {print $1}'` --namespace=test | grep -A20 Events\r\nEvents:\r\n Type Reason Age From Message\r\n ---- ------ ---- ---- -------\r\n Normal Scheduled 20s default-scheduler Successfully assigned test/addapp-57c54d5f47-n675g to akswin2000000\r\n Normal Pulling 17s kubelet, akswin2000000 Pulling image \"wcs1only/distributed-calculator-go:edge\"\r\n Normal Pulled 16s kubelet, akswin2000000 Successfully pulled image \"wcs1only/distributed-calculator-go:edge\"\r\n Normal Created 16s kubelet, akswin2000000 Created container add\r\n Normal Started 14s kubelet, akswin2000000 Started container add\r\n Normal Pulling 14s kubelet, akswin2000000 Pulling image \"docker.io/wcs1only/daprd:edge\"\r\n Normal Pulled 13s kubelet, akswin2000000 Successfully pulled image \"docker.io/wcs1only/daprd:edge\"\r\n Normal Created 13s kubelet, akswin2000000 Created container daprd\r\n Normal Started 11s kubelet, akswin2000000 Started container daprd\r\ndapr % kubectl logs `kubectl get pods --namespace=test | awk '/addapp/ {print $1}'` add --namespace=test\r\nAdding 5.000000 to 6.000000 on Windows!" + ] + } + }, + "metadata": { + "tags": [ + "dapr", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "dapr" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Job", + "Namespace", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dapr/dapr/pull/1864", + "sourceRepo": "dapr/dapr", + "reactions": 2, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:45:59.656Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dapr/dapr-3606-feat-add-config-api.json b/solutions/cncf-generated/dapr/dapr-3606-feat-add-config-api.json new file mode 100644 index 00000000..6fce667c --- /dev/null +++ b/solutions/cncf-generated/dapr/dapr-3606-feat-add-config-api.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:50.964Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dapr: feat: add config api", + "description": "# Description\n\n## Issue reference\n\nPlease reference the issue this PR will close: #2988\n\n## Checklist\n\nPlease make sure you've completed the relevant tasks for this PR, out of the following list:\n\n* [x] Code compiles correctly\n* [x] Created/updated tests\n* [x] Unit tests passing\n* [x] End-to-end tests passing\n* [ ] Extended the documentation / Created issue in the https://github.com/dapr/docs/ repo: dapr/docs#_[issue number]_\n* [ ] Specification has been updated / Created issue in the https://github.com/dapr/docs/ repo: dapr/docs#_[issue number]_\n* [ ] Provided sample for the feature / Created issue in the https://github.com/dapr/docs/ repo: dapr/docs#_[issue number]_", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@LaurenceLiZhixin notice the checks are failing", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "dapr", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "dapr" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/dapr/dapr/pull/3606", + "sourceRepo": "dapr/dapr", + "reactions": 4, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:50.964Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dapr/dapr-6419-updated-python-script-to-find-latest-release-version-and-set-env-var.json b/solutions/cncf-generated/dapr/dapr-6419-updated-python-script-to-find-latest-release-version-and-set-env-var.json new file mode 100644 index 00000000..c04f55df --- /dev/null +++ b/solutions/cncf-generated/dapr/dapr-6419-updated-python-script-to-find-latest-release-version-and-set-env-var.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:55.760Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dapr: Updated python script to find latest release version and set env var", + "description": "# Description\n\nThis PR modifies one of the python scripts used in the Github Actions Workflows to fix the issue mentioned in https://github.com/dapr/dapr/issues/5625. It defines a boolean function that checks the `.md` file names in `docs/release_notes` to find semantic versions (minus `-rc` release candidates) and compares the highest version against the release version being inputted to the script.\n\nThis was required as the script currently was not actually checking whether the release was the latest version, and instead just validating that the provided version had a release note against it and that it wasn't an `-rc` release. This introduces issues with the `:latest` image tag when a patch for a previous major version (e.g., `1.9.*`) is released.\n\n## Issue reference\n\nPlease reference the issue this PR will close: #5625\n\n## Checklist\n\nI tested the code locally but there are no unit / end-to-end tests that exist, since it is just a workflow script.\n\nPlease make sure you've completed", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "# Dapr SDK JS test\n\n🔗 **[Link to Action run](https://github.com/dapr/dapr/actions/runs/5135962852)**\n\nCommit ref: ac8cc5a34a94535e6c211c0d1d4bae48e518b81c\n\n## ✅ JS SDK tests passed", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "dapr", + "graduated", + "app-definition", + "stale" + ], + "category": "workloads", + "cncfProjects": [ + "dapr" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/dapr/dapr/pull/6419", + "sourceRepo": "dapr/dapr", + "reactions": 2, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:45:55.760Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dapr/dapr-912-adding-support-for-multiple-state-stores-in-the-same-cluster.json b/solutions/cncf-generated/dapr/dapr-912-adding-support-for-multiple-state-stores-in-the-same-cluster.json new file mode 100644 index 00000000..b7ce4912 --- /dev/null +++ b/solutions/cncf-generated/dapr/dapr-912-adding-support-for-multiple-state-stores-in-the-same-cluster.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:53.529Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dapr: Adding support for multiple state stores in the same cluster", + "description": "# Description\n\nCurrently Dapr supports only one state store , this feature is to support multiple state stores in the same cluster\n\nWith this change , the state API allows the app to target a state store, for example:\n\nv1.0/state/storeA/\nv1.0/state/storeB/\n\nThis enables a scenario wherein an app can write to multiple stores based on need, one for fast in memory cache and another for persistent sql storage.\n\n## Issue reference\nCloses #636 \n\n## Checklist\n\nPlease make sure you've completed the relevant tasks for this PR, out of the following list:\n\n* [x] Code compiles correctly\n* [x] Created/updated tests\n* [x] Unit tests passing\n* [x] End-to-end tests passing\n* [ ] Extended the documentation\n* [ ] Specification has been updated\n* [ ] Provided sample for the feature", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "dapr", + "graduated", + "app-definition", + "do-not-merge" + ], + "category": "workloads", + "cncfProjects": [ + "dapr" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/dapr/dapr/pull/912", + "sourceRepo": "dapr/dapr", + "reactions": 3, + "comments": 32 + }, + "security": { + "scannedAt": "2026-02-27T17:45:53.529Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dapr/dapr-930-allow-for-customization-of-included-components.json b/solutions/cncf-generated/dapr/dapr-930-allow-for-customization-of-included-components.json new file mode 100644 index 00000000..4d7eb3c7 --- /dev/null +++ b/solutions/cncf-generated/dapr/dapr-930-allow-for-customization-of-included-components.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:57.328Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dapr: Allow for customization of included components", + "description": "This PR moves component registration from the individual packages to explicit arguments passed into `runtime.Run` which is invoked in `main.go`. This gives adopters of Dapr the ability to fully opt in/out of the components that are compiled into `daprd`. For importantly, it is more natural to customi", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What this PR does**:\n\n**Which issue(s) this PR fixes**:\n\nFixes #\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nadd secret api\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "dapr", + "graduated", + "app-definition", + "do-not-merge" + ], + "category": "workloads", + "cncfProjects": [ + "dapr" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dapr/dapr/pull/930", + "sourceRepo": "dapr/dapr", + "reactions": 2, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:45:57.328Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dapr/dapr-962-initial-prometheus-metrics-endpoint.json b/solutions/cncf-generated/dapr/dapr-962-initial-prometheus-metrics-endpoint.json new file mode 100644 index 00000000..e318cbba --- /dev/null +++ b/solutions/cncf-generated/dapr/dapr-962-initial-prometheus-metrics-endpoint.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:52.538Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dapr: Initial Prometheus Metrics Endpoint", + "description": "# Description\n\nI've added an initial metrics endpoint using OpenCensus's Prometheus exporter to hook into Dapr's HTTP pipeline. Due to the incompatibility between fasthttp, net/http, and ochttp this is a bit more messy than I'd like. I'll continue to investigate better alternatives.\n\nThis is built on the existing work in #950 \n\nExample `/metrics` endpoint:\n```\n# HELP nodeapp_opencensus_io_http_server_latency Latency distribution of HTTP requests\n# TYPE nodeapp_opencensus_io_http_server_latency histogram\nnodeapp_opencensus_io_http_server_latency_bucket{le=\"1\"} 2\nnodeapp_opencensus_io_http_server_latency_bucket{le=\"2\"} 5\nnodeapp_opencensus_io_http_server_latency_bucket{le=\"3\"} 9\nnodeapp_opencensus_io_http_server_latency_bucket{le=\"4\"} 12\nnodeapp_opencensus_io_http_server_latency_bucket{le=\"5\"} 13\nnodeapp_opencensus_io_http_server_latency_bucket{le=\"6\"} 14\nnodeapp_opencensus_io_http_server_latency_bucket{le=\"8\"} 14\nnodeapp_opencensus_io_http_server_latency_bucket{le=\"10\"} 14\nnodeapp_openc", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "# Description\n\nA middleware that can be used to rate limit the number of requests passing through the HTTP pipeline.\n\nExample component\n```yaml\n apiVersion: dapr.io/v1alpha1\n kind: Component\n metadata:\n name: ratelimit\n spec:\n type: middleware.http.ratelimit\n metadata:\n - name: maxRequestsPerSecond\n value: 10\n```\nExample configuration\n```yaml\n apiVersion: dapr.io/v1alpha1\n kind: Configuration\n metadata:\n name: pipeline\n spec:\n httpPipeline:\n handlers:\n - name: ratelimit\n type: middleware.http.ratelimit\n```\n\n_NOTE:_ The middleware will be invoked on any HTTP ingress request to Dapr and therefore this will also throttle the user application's calls.\n\n## Issue reference\n\nPlease reference the issue this PR will close: #192 \n\n## Checklist\n\nPlease make sure you've completed the relevant tasks for this PR, out of the following list:\n\n* [x] Code compiles correctly\n* [ ] Created/updated tests\n* [ ] Extended the documentation", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: dapr.io/v1alpha1\r\n kind: Component\r\n metadata:\r\n name: ratelimit\r\n spec:\r\n type: middleware.http.ratelimit\r\n metadata:\r\n - name: maxRequestsPerSecond\r\n value: 10", + "apiVersion: dapr.io/v1alpha1\r\n kind: Configuration\r\n metadata:\r\n name: pipeline\r\n spec:\r\n httpPipeline:\r\n handlers:\r\n - name: ratelimit\r\n type: middleware.http.ratelimit", + "These are just the default ones already supported but as we add others we can optionally add those to the view.\r\n\r\n\n> @youngbupark if we standardize on a Prometheus endpoint (pull) then people can use whatever compatible agents they want to scrape it. Or would you prefer to support other [OC stats exporters](https://opencensus.io/exporters/supported-exporters/go/) which would require both pull and push? This would then require a new \"MetricsExporter\" type I guess.\r\n\r\nWe may need to support both at the end. I agree that in the first iteration, Prometheus can be the default metric backends in Dapr. In the long term, we need to allow user to use the other oc exporter. \r\nSome teams or companies have their internal telemetry backend/instrumentation, but doesn't support prometheus pull model. Then they need to develop scraping agent to premetheus metric directly (like AKS oms agent) or a loader from prometheus to their own telemetry backend(stackdriver-prometheus sidecar). Allowing oc exporter will be more flexible for them. We need to discuss it more. \r\n\r\n> If we just do Prometheus (no other exporters) - do you think it's reasonable to define what metrics you want to expose in the dapr config similar to how the `TracingSpec` works, or would you want to define a component? To me it feels like if we're only supporting Prometheus there is no \"building block\" and we should just have some metrics toggles in the dapr config such as:\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "dapr", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "dapr" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dapr/dapr/pull/962", + "sourceRepo": "dapr/dapr", + "reactions": 3, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:45:52.538Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/devfile/devfile-333-remove-plugin-s-from-the-devfile-schema.json b/solutions/cncf-generated/devfile/devfile-333-remove-plugin-s-from-the-devfile-schema.json new file mode 100644 index 00000000..98f6ecd0 --- /dev/null +++ b/solutions/cncf-generated/devfile/devfile-333-remove-plugin-s-from-the-devfile-schema.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:29.785Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "devfile: Remove `plugin`s from the Devfile schema", + "description": "### What does this PR do?\n\nThis PR proposes the very light changes required to remove the `plugin` components from the user-facing Devfile Json schema, while keeping them in GO sources and Kubernetes CRDs, mainly at the SPI level, as a way for tooling implementations to complete a devfile with additional content according to their own logic.\n\nThis PR is created as a Draft, since I didn't change the samples, as well as other possibly required changes in tests, etc ...\nIts purpose was mainly to contribute the required change in the generator source code.\n\nSo this PR should be completed with according changes in samples, tests, etc... before being mergeable.\n\n### What issues does this PR fix or reference?\n\nNot sure an issue has been created already in this GH repo. But this removal of plugins from devfile schema has been discussed in the Devfile Cabal and forseen as a change that should be done asap, as soon as a complete agreement with all stakeholders has been reached.\n\n--\nEDIT by Maysu", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fixes [#182: Review Migration Guide](https://github.com/devfile/api/issues/182). I've gone through the migration docs and made them more strictly adhere to the IBM style guide. \n\nNote, I removed a lot of the \"procedures\" because there weren't actually instructions for user to do anything. We most certainly can include more instructions on how to migrate, but I'm going to need help with developing that content. \n\nWe also link a lot to GitHub issues. I'm fine with this, but with all links and GH issues especially, I'm concerned the issues we link to will quickly become outdated and therefore no longer useful.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "devfile", + "sandbox", + "app-definition", + "lgtm" + ], + "category": "workloads", + "cncfProjects": [ + "devfile" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/devfile/api/pull/333", + "sourceRepo": "devfile/api", + "reactions": 0, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:47:29.786Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/devfile/devfile-35-implement-agreed-on-devfile-2-0-issues.json b/solutions/cncf-generated/devfile/devfile-35-implement-agreed-on-devfile-2-0-issues.json new file mode 100644 index 00000000..0ee82d1e --- /dev/null +++ b/solutions/cncf-generated/devfile/devfile-35-implement-agreed-on-devfile-2-0-issues.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:30.856Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "devfile: Implement agreed-on devfile 2.0 issues", + "description": "### What does this PR do?\n\nThis PR implements the changes agreed on in the [devfile 2.0 epic](https://github.com/che-incubator/devworkspace-api/issues/15)\n\n### What issues does this PR fix or reference?\n\nhttps://github.com/che-incubator/devworkspace-api/issues/15", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Open Developer Workspace:\n[![Contribute](https://che.openshift.io/factory/resources/factory-contribute.svg)](https://che.openshift.io/f/?url=https://github.com/che-incubator/devworkspace-api/tree/Implement-agreed-on-devfile-2.0-issues)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "devfile", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "devfile" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/devfile/api/pull/35", + "sourceRepo": "devfile/api", + "reactions": 0, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:47:30.856Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/devspace/devspace-2344-feat-add-tanka-integration-to-devspace.json b/solutions/cncf-generated/devspace/devspace-2344-feat-add-tanka-integration-to-devspace.json new file mode 100644 index 00000000..2a98a0b0 --- /dev/null +++ b/solutions/cncf-generated/devspace/devspace-2344-feat-add-tanka-integration-to-devspace.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:32.976Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "devspace: feat: add tanka integration to Devspace", + "description": "/kind feature\nresolves https://github.com/loft-sh/devspace/issues/2269\n\n## Purpose\nThis PR picks up on #2325 and introduces Tanka environment support for Devspaces. We internally use Tanka and would like to share a common code base between all environments, including local development stacks.\n\n## Todos\n- [x] Extend the schema definition with a `tanka` deployment configuration\n- [x] Fix the schema of deployments.tanka\n- [x] Add a Tanka deployer under pkg/devspace/deploy/deployer/tanka\n- [x] Check if we add an option to run jb update before using tk\n- [ ] ~Make a PR to https://github.com/loft-sh/loft-util to be able to download tanka binary~\n- [x] Add e2e tests\n\nCloses #2325", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@xvzf we commit the dependencies to make sure everything that is needed to run the project is checked into github which includes the `vendor` folder. What dependencies did you need to update?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "devspace", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "devspace" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/devspace-sh/devspace/pull/2344", + "sourceRepo": "devspace-sh/devspace", + "reactions": 3, + "comments": 27 + }, + "security": { + "scannedAt": "2026-02-27T17:47:32.976Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dex/dex-1138-support-connector-selection-in-client-requests.json b/solutions/cncf-generated/dex/dex-1138-support-connector-selection-in-client-requests.json new file mode 100644 index 00000000..d84913a6 --- /dev/null +++ b/solutions/cncf-generated/dex/dex-1138-support-connector-selection-in-client-requests.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:36.600Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dex: support connector selection in client requests", + "description": "Fixes #1084", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is already possible since #1481 (also see #1084). Add a `connector_id` query parameter to the auth URL.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "dex", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "dex" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dexidp/dex/pull/1138", + "sourceRepo": "dexidp/dex", + "reactions": 20, + "comments": 6 + }, + "security": { + "scannedAt": "2026-02-27T17:47:36.600Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dex/dex-1180-implement-refreshing-with-google.json b/solutions/cncf-generated/dex/dex-1180-implement-refreshing-with-google.json new file mode 100644 index 00000000..9b5c20cf --- /dev/null +++ b/solutions/cncf-generated/dex/dex-1180-implement-refreshing-with-google.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:42.796Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dex: Implement refreshing with Google", + "description": "Fixes: #863 \n\nI've read through the document provided in this [comment](https://github.com/coreos/dex/issues/863#issuecomment-294997858) by @ericchiang and implemented the changes he describes.\n\nI've been testing it with the memory storage and connecting to Google so far and everything seems to be working (Added lots of Prints while debugging). \n\nHave had two clients set up, logged the same user into A, then into B (updating the refresh token since A's is now invalid), then refreshed client A, which used the token retrieved by client B to refresh.\n\nI believe this should now allow the creation of a specific Google connector, in turn allowing someone to implement retrieval of groups using their Admin Directory API", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fixes #866 #1001 \n\n**This PR should not be used without** #1180. Wanted to get the PR opened though in case anyone wants this feature urgently and can't wait until both PRs are merged. \n\nI have created a separate Google connector that uses a service account to fetch all groups that a user is a member of. \n\nI wasn't sure which documentation would need to be updated so I have added a document explaining the new parameters and how to set up the Google service account, but I reckon this will need extra documentation changes before it can be merged, let me know what it needs.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "dex", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "dex" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dexidp/dex/pull/1180", + "sourceRepo": "dexidp/dex", + "reactions": 6, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:47:42.796Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dex/dex-1185-fetch-groups-in-a-google-connector.json b/solutions/cncf-generated/dex/dex-1185-fetch-groups-in-a-google-connector.json new file mode 100644 index 00000000..1f33414b --- /dev/null +++ b/solutions/cncf-generated/dex/dex-1185-fetch-groups-in-a-google-connector.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:38.598Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dex: Fetch groups in a Google Connector", + "description": "Fixes #866 #1001 \n\n**This PR should not be used without** #1180. Wanted to get the PR opened though in case anyone wants this feature urgently and can't wait until both PRs are merged. \n\nI have created a separate Google connector that uses a service account to fetch all groups that a user is a member of. \n\nI wasn't sure which documentation would need to be updated so I have added a document explaining the new parameters and how to set up the Google service account, but I reckon this will need extra documentation changes before it can be merged, let me know what it needs.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fixes: #863 \n\nI've read through the document provided in this [comment](https://github.com/coreos/dex/issues/863#issuecomment-294997858) by @ericchiang and implemented the changes he describes.\n\nI've been testing it with the memory storage and connecting to Google so far and everything seems to be working (Added lots of Prints while debugging). \n\nHave had two clients set up, logged the same user into A, then into B (updating the refresh token since A's is now invalid), then refreshed client A, which used the token retrieved by client B to refresh.\n\nI believe this should now allow the creation of a specific Google connector, in turn allowing someone to implement retrieval of groups using their Admin Directory API", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "dex", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "dex" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dexidp/dex/pull/1185", + "sourceRepo": "dexidp/dex", + "reactions": 15, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:47:38.598Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dex/dex-1784-pkce-implementation.json b/solutions/cncf-generated/dex/dex-1784-pkce-implementation.json new file mode 100644 index 00000000..6280f75f --- /dev/null +++ b/solutions/cncf-generated/dex/dex-1784-pkce-implementation.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:44.332Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dex: PKCE implementation", + "description": "This pull request adds the PKCE (Proof Key for Code Exchange) implementation. ([RFC 7636](https://tools.ietf.org/html/rfc7636)), Closes #1114.\n\nBased on the implementation of @Teeed (PR #1652)\n- add the remarks of @deric (return invalid_grand when wrong code_verifier)\n- fix the issue, @mfmarche found.\n- Enforce PKCE flow on /token when PKCE flow was started on /auth\n- made sure, the correct errors are returned\n- add Tests for the error cases\n- make sure, the client_secret is only omitted, when code_verifier is received on /token, and grant_type is correct.\n- add \"Authorization\" to allowed CORS headers\n\nI am lookin forward to your review.\n\n### Test PKCE\nYou should use a public client (without secret), so PKCE is not stopped by a missing client_secret\n```yaml\nstaticClients:\n- id: example-app\n name: 'Example App'\n public: true\n```\n### Note\n* Configuring redirectURIs does currently not work with public clients (see #1822) but public clients allow all localhost callback URIs (e.g. http://", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Part of the larger #1547\n\nThis PR adds the dex configuration, and CP Authentication redux logic", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "staticClients:\r\n- id: example-app\r\n name: 'Example App'\r\n public: true", + "4.4.1. Error Response\r\n If the server requires Proof Key for Code Exchange (PKCE) by OAuth\r\n public clients and the client does not send the \"code_challenge\" in\r\n the request, the authorization endpoint MUST return the authorization\r\n error response with the \"error\" value set to \"invalid_request\". The\r\n \"error_description\" or the response of \"error_uri\" SHOULD explain the\r\n nature of error, e.g., code challenge required.", + "staticClients:\r\n- id: example-app\r\n name: 'Example App'\r\n public: true" + ] + } + }, + "metadata": { + "tags": [ + "dex", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "dex" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dexidp/dex/pull/1784", + "sourceRepo": "dexidp/dex", + "reactions": 4, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:47:44.332Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dex/dex-1829-support-multiple-refresh-tokens-per-user.json b/solutions/cncf-generated/dex/dex-1829-support-multiple-refresh-tokens-per-user.json new file mode 100644 index 00000000..04f3cb54 --- /dev/null +++ b/solutions/cncf-generated/dex/dex-1829-support-multiple-refresh-tokens-per-user.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:39.468Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dex: Support multiple refresh tokens per user.", + "description": "fixes #981\n\nThis PR introduces `enableMultiRefreshTokens` option into config to enable multi refresh tokens per user.\nIn our environment, each user uses multiple id tokens in laptops so this feature is very helpful.\n\n## Details\n\n- `enableMultiRefreshTokens` option defaults to false and in that case this PR does not change any behavior.\n- When `enableMultiRefreshTokens` is true, Dex skips to delete a refresh token in issuing an id token.\n- To minimize changes in storage layer, `ListRefresh` and `RevokeRefresh` gRPC API use `Storage.ListRefreshTokens` in `enableMultiRefreshTokens` mode. This can be heavy but num of refresh tokens not is expected very high (same as num of id tokens).\n- `storage.OfflineSessions.Refresh` contains the latest-issued refresh token.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I appreciate your work @ryotarai and I would welcome \"multiple refresh token per user\" feature, but I have some problems with how this change is designed:\n\n1) Essentially the OfflineSessions.Refresh[clientID] now points to the last created refresh, but not necessarily last updated refresh.\nThis can be confusing as I would naturally expect OfflineSessions.Refresh[clientID].LastUsed to give me info when was the last time refresh was performed for this client.\n\nI would also expect OfflineSessions.Refresh[clientID] to store information about all issued refresh tokens. (I know this is much more challenging as would require schema changes and storage plugins update) \n\n2) Also with multiple tokens now the number of refresh tokens could grow indefinitely so it would be good to introduce some (potentially configurable) limit to this. But this raises question: what to do when limit is reached. Probably overwriting least recently used would be a good thing?\n\n3) Another point is this change potentially weakens security. Currently if user logs-in again their refresh token gets overwritten. So even if some old refresh token is leaked - it's most likely invalid at this point. With this feature on, it's more likely that the old refresh token can still be valid. Maybe refresh token expiry should be considered to mitigate this?", + "steps": [ + "Essentially the OfflineSessions.Refresh[clientID] now points to the last created refresh, but not necessarily last updated refresh.", + "Also with multiple tokens now the number of refresh tokens could grow indefinitely so it would be good to introduce some (potentially configurable) limit to this. But this raises question: what to do when limit is reached. Probably overwriting least recently used would be a good thing?", + "Another point is this change potentially weakens security. Currently if user logs-in again their refresh token gets overwritten. So even if some old refresh token is leaked - it's most likely invalid at this point. With this feature on, it's more likely that the old refresh token can still be valid. Maybe refresh token expiry should be considered to mitigate this?" + ] + } + }, + "metadata": { + "tags": [ + "dex", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "dex" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/dexidp/dex/pull/1829", + "sourceRepo": "dexidp/dex", + "reactions": 12, + "comments": 7 + }, + "security": { + "scannedAt": "2026-02-27T17:47:39.468Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dex/dex-1846-feat-add-refresh-token-expiration-and-rotation-settings.json b/solutions/cncf-generated/dex/dex-1846-feat-add-refresh-token-expiration-and-rotation-settings.json new file mode 100644 index 00000000..e68f5ca0 --- /dev/null +++ b/solutions/cncf-generated/dex/dex-1846-feat-add-refresh-token-expiration-and-rotation-settings.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:35.670Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dex: feat: Add refresh token expiration and rotation settings", + "description": "This PR adds various settings to adjust refresh token rotation and expiration. I will describe the purpose of each option below. Some of them required to improve security and suggested by specs. On the other hand, there are options aimed to improve UX in exchange for security.\n\n### Options\nCurrent refresh tokens issued by dex has no lifetime limit. Even with enabled rotation, it possible (for example, for device thieves) to use stolen token endlessly if the original user never logs in again. Options below allow users to make their authentication pipeline more securable.\n* **validForIfNotUsed** - it's recommended to invalidate refresh tokens if they are not used.\n* **absoluteLifetime** - a stricter variant of the previous option, forces users to reauthenticate and obtain a new refresh token no matter what.\n\nThe following options are aimed to improve user experience. [RFC6819](https://tools.ietf.org/html/rfc6819#section-5.2.2.3) mention, that users may encounter problems with every-reque", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "fixes #981\n\nThis PR introduces `enableMultiRefreshTokens` option into config to enable multi refresh tokens per user.\nIn our environment, each user uses multiple id tokens in laptops so this feature is very helpful.\n\n## Details\n\n- `enableMultiRefreshTokens` option defaults to false and in that case this PR does not change any behavior.\n- When `enableMultiRefreshTokens` is true, Dex skips to delete a refresh token in issuing an id token.\n- To minimize changes in storage layer, `ListRefresh` and `RevokeRefresh` gRPC API use `Storage.ListRefreshTokens` in `enableMultiRefreshTokens` mode. This can be heavy but num of refresh tokens not is expected very high (same as num of id tokens).\n- `storage.OfflineSessions.Refresh` contains the latest-issued refresh token.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "dex", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "dex" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/dexidp/dex/pull/1846", + "sourceRepo": "dexidp/dex", + "reactions": 27, + "comments": 8 + }, + "security": { + "scannedAt": "2026-02-27T17:47:35.671Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dex/dex-2047-adjust-scopes-to-be-in-line-with-the-microsoft-identity-platform-v2.json b/solutions/cncf-generated/dex/dex-2047-adjust-scopes-to-be-in-line-with-the-microsoft-identity-platform-v2.json new file mode 100644 index 00000000..4199acbf --- /dev/null +++ b/solutions/cncf-generated/dex/dex-2047-adjust-scopes-to-be-in-line-with-the-microsoft-identity-platform-v2.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:47.671Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dex: Adjust scopes to be in line with the Microsoft Identity Platform v2.", + "description": "#### Overview\n\nThis PR adds the openid scope necessary for the Microsoft Identity Platform v2 as well as changes the user and group scopes to the ones now supported on the Microsoft graph API. \nThis PR was created together with Microsoft backend engineers and fixes https://github.com/dexidp/dex/issues/1855\n\n#### What this PR does / why we need it\n\nThe current Microsoft connector just works. But if you enabled features like 2fa on your Microsoft Azure AD Application this is not enforced, because the scopes used trigger the Azure App in a different way than intended. \n\nSo, to be able to use 2fa on a Microsoft Azure AD App, you need those changes. \n\n#### Special notes for your reviewer\n\nI tested this change locally with group sync and refresh token. Same behaviour as before, except that policies like 2fa applied to the Azure AD App now are enforced by Microsoft. \n\n#### Does this PR introduce a user-facing change?\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "#### Overview\n\nAdded Microsoft Identity Platform v2 specific scopes in order to also support conditional access policies, like MFA, TermsOfUse, etc. It does not change the behaviour of current implementation and works with or without conditional access policy configured.\n\nWe have this fix in production for five months without any issues.\n\n#### What this PR does / why we need it\n\nIn order to support enforced policies, like MFA, you need to use the Microsoft Identity Platform v2 with according scopes. The configuration on Dex-side remains the same and works as of today, with or without conditional access, groups and refresh tokens.\n\nSee also: https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-permissions-and-consent#the-default-scope\n\nCloses #1855\n\n#### Special notes for your reviewer\n\nThis PR is based on #2047 and contains the changes to the MS connector only. It has been tested on top of v2.35 and v2.36.\n\n#### Does this PR introduce a user-facing change?\n\n```release-note\nNONE\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\r\n\r\n#### Overview\r\n\r\n\r\n\r\nAdded Microsoft Identity Platform v2 specific scopes in order to also support conditional access policies, like MFA, TermsOfUse, etc. It does not change the behaviour of current implementation and works with or without conditional access policy configured.\r\n\r\nWe have this fix in production for five months without any issues.\r\n\r\n#### What this PR does / why we need it\r\n\r\n\r\n\r\nIn order to support enforced policies, like MFA, you need to use the Microsoft Identity Platform v2 with according scopes. The configuration on Dex-side remains the same and works as of today, with or without conditional access, groups and refresh tokens.\r\n\r\nSee also: https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-permissions-and-consent#the-default-scope\r\n\r\nCloses #1855\r\n\r\n#### Special notes for your reviewer\r\n\r\nThis PR is based on #2047 and contains the changes to the MS connector only. It has been tested on top of v2.35 and v2.36.\r\n\r\n#### Does this PR introduce a user-facing change?\r\n\r\n" + ] + } + }, + "metadata": { + "tags": [ + "dex", + "sandbox", + "security", + "area-connector" + ], + "category": "security", + "cncfProjects": [ + "dex" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dexidp/dex/pull/2047", + "sourceRepo": "dexidp/dex", + "reactions": 2, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:47:47.671Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dex/dex-2352-use-gitlab-s-refresh-token-during-refresh.json b/solutions/cncf-generated/dex/dex-2352-use-gitlab-s-refresh-token-during-refresh.json new file mode 100644 index 00000000..39f3f13d --- /dev/null +++ b/solutions/cncf-generated/dex/dex-2352-use-gitlab-s-refresh-token-during-refresh.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:46.038Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dex: Use GitLab's refresh_token during Refresh.", + "description": "#### Overview\n\nUse GitLab's `refresh_token` instead of `acces_token` during `connector.Refresh` when `offline_access` is specified.\n\n#### What this PR does / why we need it\n\nCloses #2316 \n\n#### Special notes for your reviewer\n\n#### Does this PR introduce a user-facing change?\n\n```release-note\nFix GitLab connector to use refresh_tokens with `offline_access`\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR avoids CVE-2022-2097/CVE-2022-30065. As of dex:v2.32.0 they now provide distroless images. Another alternative would be to upgrade to ```http://ghcr.io/dexidp/dex:v2.33.0``` although [this PR](https://github.com/dexidp/dex/pull/2352) may introduce some buggy behavior for our users. \n\nI don't see any binaries that argocd-dex depends on, using a distroless image in this case should be fine. \n\nSigned-off-by: Justin Marquis <34fathombelow@protonmail.com>\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [x] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [x] The title of the PR states what changed and the related issues number (used for the release note).\n* [ ] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [ ] Does this PR require documentation updates?\n* [ ] I've updated documentation as required by this PR.\n* [ ] Optional. My organization is added to USERS.md.\n* [x] I have signed off all my commits as required by [DCO](https://github.com/argoproj/a", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "This PR avoids CVE-2022-2097/CVE-2022-30065. As of dex:v2.32.0 they now provide distroless images. Another alternative would be to upgrade to", + "{\"level\":\"error\",\"msg\":\"failed to refresh identity: gitlab: failed to get refresh token: oauth2: cannot fetch token: 400 Bad Request\\nResponse: {\\\"error\\\":\\\"invalid_grant\\\",\\\"error_description\\\":\\\"The provided authorization grant is invalid, expired, revoked, does not match the redirection URI used in the authorization request, or was issued to another client.\\\"}\",\"time\":\"2022-03-05T04:13:54Z\"}\r\n{\"level\":\"error\",\"msg\":\"failed to refresh identity: gitlab: failed to get refresh token: oauth2: cannot fetch token: 400 Bad Request\\nResponse: {\\\"error\\\":\\\"invalid_grant\\\",\\\"error_description\\\":\\\"The provided authorization grant is invalid, expired, revoked, does not match the redirection URI used in the authorization request, or was issued to another client.\\\"}\",\"time\":\"2022-03-05T04:13:54Z\"}\r\n{\"level\":\"error\",\"msg\":\"failed to refresh identity: gitlab: failed to get refresh token: oauth2: cannot fetch token: 400 Bad Request\\nResponse: {\\\"error\\\":\\\"invalid_grant\\\",\\\"error_description\\\":\\\"The provided authorization grant is invalid, expired, revoked, does not match the redirection URI used in the authorization request, or was issued to another client.\\\"}\",\"time\":\"2022-03-05T04:13:54Z\"}\r\n{\"level\":\"error\",\"msg\":\"failed to refresh identity: gitlab: failed to get refresh token: oauth2: cannot fetch token: 400 Bad Request\\nResponse: {\\\"error\\\":\\\"invalid_grant\\\",\\\"error_description\\\":\\\"The provided authorization grant is invalid, expired, revoked, does not match the redirection URI used in the authorization request, or was issued to another client.\\\"}\",\"time\":\"2022-03-05T04:13:54Z\"}", + "Started POST \"/oauth/token\" for **** at 2022-03-05 07:13:54 +0300\r\nStarted POST \"/oauth/token\" for **** at 2022-03-05 07:13:54 +0300\r\nStarted POST \"/oauth/token\" for **** at 2022-03-05 07:13:54 +0300\r\nStarted POST \"/oauth/token\" for **** at 2022-03-05 07:13:54 +0300\r\nProcessing by Oauth::TokensController#create as HTML\r\n Parameters: {\"grant_type\"=>\"refresh_token\", \"refresh_token\"=>\"[FILTERED]\"}\r\nProcessing by Oauth::TokensController#create as HTML\r\n Parameters: {\"grant_type\"=>\"refresh_token\", \"refresh_token\"=>\"[FILTERED]\"}\r\nProcessing by Oauth::TokensController#create as HTML\r\n Parameters: {\"grant_type\"=>\"refresh_token\", \"refresh_token\"=>\"[FILTERED]\"}\r\nProcessing by Oauth::TokensController#create as HTML\r\n Parameters: {\"grant_type\"=>\"refresh_token\", \"refresh_token\"=>\"[FILTERED]\"}\r\nStarted POST \"/oauth/token\" for 95.217.82.131 at 2022-03-05 07:13:54 +0300\r\nProcessing by Oauth::TokensController#create as HTML\r\n Parameters: {\"grant_type\"=>\"refresh_token\", \"refresh_token\"=>\"[FILTERED]\"}\r\nCompleted 400 Bad Request in 36ms (ActiveRecord: 14.7ms | Elasticsearch: 0.0ms | Allocations: 2815)\r\nCompleted 400 Bad Request in 35ms (ActiveRecord: 15.0ms | Elasticsearch: 0.0ms | Allocations: 3238)\r\nCompleted 400 Bad Request in 57ms (ActiveRecord: 29.3ms | Elasticsearch: 0.0ms | Allocations: 9069)\r\nCompleted 200 OK in 61ms (Views: 0.3ms | ActiveRecord: 12.8ms | Elasticsearch: 0.0ms | Allocations: 11997)\r\nCompleted 400 Bad Request in 47ms (Views: 0.3ms | ActiveRecord: 21.4ms | Elasticsearch: 0.0ms | Allocations: 8327)" + ] + } + }, + "metadata": { + "tags": [ + "dex", + "sandbox", + "security", + "area-connector", + "release-note-enhancement" + ], + "category": "security", + "cncfProjects": [ + "dex" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dexidp/dex/pull/2352", + "sourceRepo": "dexidp/dex", + "reactions": 3, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:47:46.038Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dex/dex-2989-google-implement-groups-fetch-by-default-service-account-from-metadata-.json b/solutions/cncf-generated/dex/dex-2989-google-implement-groups-fetch-by-default-service-account-from-metadata-.json new file mode 100644 index 00000000..d3e1be59 --- /dev/null +++ b/solutions/cncf-generated/dex/dex-2989-google-implement-groups-fetch-by-default-service-account-from-metadata-.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:41.041Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "dex: Google: Implement groups fetch by default service account from metadata (support for GKE workload identity)", + "description": "#### Overview\nHello,\n\nThis pull request addresses the need to fetch groups using the default service account from metadata in the Dex Google Connector. It adds more robust support for Google Cloud Platform environments, particularly GKE Workload Identity, and increases the module's resilience and versatility.\n\n#### What this PR does / why we need it\n\nFix #2676\n\n#### Special notes for your reviewer\n\n#### Does this PR introduce a user-facing change?\nNo\n\n```release-note\nImplement groups fetch by default service account from metadata (support for GKE workload identity)\n```\n\nDocs PR: https://github.com/dexidp/website/pull/138", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Documentation Update: Reflecting Changes from PRs [#2989](https://github.com/dexidp/dex/pull/2989) and [#2911](https://github.com/dexidp/dex/pull/2911)\n\nThis documentation update is intended to capture the important changes made in Pull Requests [#2989](https://github.com/dexidp/dex/pull/2989) and [#2911](https://github.com/dexidp/dex/pull/2911). Both PRs brought notable enhancements to the Dex project and it's essential to ensure our documentation reflects these updates.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "dex", + "sandbox", + "security", + "release-note-enhancement" + ], + "category": "security", + "cncfProjects": [ + "dex" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/dexidp/dex/pull/2989", + "sourceRepo": "dexidp/dex", + "reactions": 8, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:47:41.041Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-1956-untag-manifests-for-delete-api-calls-by-tag.json b/solutions/cncf-generated/distribution/distribution-1956-untag-manifests-for-delete-api-calls-by-tag.json new file mode 100644 index 00000000..3a03e00a --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-1956-untag-manifests-for-delete-api-calls-by-tag.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:59.729Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "distribution: Untag manifests for DELETE API calls by tag", + "description": "This is a proposed implementation for #1954 that would help close #1811 #1859\n\nDocumentation will have to be updated as well if this is an accepted API change.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@sergeyfd sorry I have no responded. I have the same feeling that I have expressed before, without the ability to lock this operation could be racy. Your deletion logic is correct though. As for the race, there are 3 approaches we could take....\n\n1. Don't worry about it, it is very unlikely a specific manifest digest is tagged within that deletion windows.\n2. Allow deletion calls to occur while the registry is in read only mode. This deletion will not have a negative on any running garbage collection.\n3. Wait until we have transactional metadata, still no timeframe on getting this merged in though.\n\nI think option 2 is interesting but don't think that is a simple change and may be a breach of contract. Users are already doing option 1 so merging this would probably just make that approach official.\n\nPing @stevvooe @aaronlehmann do we want this feature now or should we stick with our current solutions and wait until option 3 to merge something in.", + "steps": [ + "Don't worry about it, it is very unlikely a specific manifest digest is tagged within that deletion windows.", + "Allow deletion calls to occur while the registry is in read only mode. This deletion will not have a negative on any running garbage collection.", + "Wait until we have transactional metadata, still no timeframe on getting this merged in though." + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition", + "status-1-design-review", + "status-needs-attention" + ], + "category": "workloads", + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/distribution/distribution/pull/1956", + "sourceRepo": "distribution/distribution", + "reactions": 3, + "comments": 34 + }, + "security": { + "scannedAt": "2026-02-27T17:47:59.729Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-2934-add-config-support-for-limiting-allowed-mediatypes.json b/solutions/cncf-generated/distribution/distribution-2934-add-config-support-for-limiting-allowed-mediatypes.json new file mode 100644 index 00000000..51bf03f3 --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-2934-add-config-support-for-limiting-allowed-mediatypes.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:55.125Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "distribution: Add config support for limiting allowed mediaTypes", + "description": "Fixes #2928 (please see issue for details).\n\nAdded docs and tests. Let me know if any changes required. Thank you!", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Adds page for projects or companies currently engaged in efforts to\nsupport OCI artifacts.\n\nSigned-off-by: hasheddan ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| Flag | Coverage Δ | |\n|---|---|---|\n| #linux | `60.68% <75.86%> (+0.23%)` | :arrow_up: |\n\n| [Impacted Files](https://codecov.io/gh/docker/distribution/pull/2934?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [configuration/configuration.go](https://codecov.io/gh/docker/distribution/pull/2934/diff?src=pr&el=tree#diff-Y29uZmlndXJhdGlvbi9jb25maWd1cmF0aW9uLmdv) | `65.59% <ø> (ø)` | :arrow_up: |\n| [registry/api/v2/descriptors.go](https://codecov.io/gh/docker/distribution/pull/2934/diff?src=pr&el=tree#diff-cmVnaXN0cnkvYXBpL3YyL2Rlc2NyaXB0b3JzLmdv) | `100% <ø> (ø)` | :arrow_up: |\n| [registry/handlers/manifests.go](https://codecov.io/gh/docker/distribution/pull/2934/diff?src=pr&el=tree#diff-cmVnaXN0cnkvaGFuZGxlcnMvbWFuaWZlc3RzLmdv) | `52.94% <0%> (-0.71%)` | :arrow_down: |\n| [registry/storage/ocimanifesthandler.go](https://codecov.io/gh/docker/distribution/pull/2934/diff?src=pr&el=tree#diff-cmVnaXN0cnkvc3RvcmFnZS9vY2ltYW5pZmVzdGhhbmRsZXIuZ28=) | `70.31% <100%> (+1.46%)` | :arrow_up: |\n| [registry/storage/registry.go](https://codecov.io/gh/docker/distribution/pull/2934/diff?src=pr&el=tree#diff-cmVnaXN0cnkvc3RvcmFnZS9yZWdpc3RyeS5nbw==) | `89.56% <100%> (+1.28%)` | :arrow_up: |\n| [registry/storage/schema2manifesthandler.go](https://codecov.io/gh/docker/distribution/pull/2934/diff?src=pr&el=tree#diff-cmVnaXN0cnkvc3RvcmFnZS9zY2hlbWEybWFuaWZlc3RoYW5kbGVyLmdv) | `67.18% <100%> (+1.61%)` | :arrow_up: |\n| [registry/storage/mediatype.go](https://codecov.io/gh/docker/distribution/pull/2934/diff?src=pr&el=tree#diff-cmVnaXN0cnkvc3RvcmFnZS9tZWRpYXR5cGUuZ28=) | `100% <100%> (ø)` | |\n| [registry/handlers/app.go](https://codecov.io/gh/docker/distribution/pull/2934/diff?src=pr&el=tree#diff-cmVnaXN0cnkvaGFuZGxlcnMvYXBwLmdv) | `49.48% <53.12%> (+1.32%)` | :arrow_up: |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/docker/distribution/pull/2934?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/docker/distribution/pull/2934?src=pr&el=footer). Last update [3226863...f0489b7](https://codecov.io/gh/docker/distribution/pull/2934?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\nHi, any update?\n@jdolitsky thx for this PR, lots to review, seems well thought out :-) @dmcgowan \n@mikebrow yep, definitely! Let me know if any changes required, happy to do so\nNot a maintainer here, so just a drive-by observation 🤗\r\n\r\n\r\nIt looks like there's a subtle, but possibly important difference between and URL allow/deny rules;\r\n\r\nFor URLs:\r\n\r\n> If allow is `unset`, pushing a manifest containing URLs fails.\r\n\r\nFor mediaTypes:\r\n\r\n> If allow is unset, pushing a manifest containing any mediaType in the config succeeds (all config mediaTypes allowed).\r\n\r\n\r\nI assume this is for backward compatibility, but as a result won't allow the option of configuring a _whitelist_ (deny \"everything\", and _allow_ a specific list of media types)\r\n\r\n- Would that be an important use-case?\r\n- To facilitate that use-case, would perhaps making a distinction between \"no set\" and \"set, but empty list\" work?\r\n\r\nAnother thing I looked at was \"regular expressions\" in the initial implementation. I'm not a maintainer for this project, but I tend to prefer keeping things simple (in this case; just a list of mediatypes), unless it's critical to make the feature useful. (Usually, it's easier to _add_ functionality (eg separate `allowMatch` rule) than to _remove_ once added).\r\n\r\nI can see it being consistent with existing allow/deny rules, so that may be the reason for using regular expressions; just curious;\r\n\r\n- If there's actual use-cases where regular expressions are _required_ for this functionality (the apples/bananas example is fictive, so doesn't give a clue for a real-life scenario), or\r\n- if regular expressions add _significant_ benefits for the feature\r\n\r\n\n@caervs -\r\n\r\n> Do you think it would make sense as a follow-up to this PR for the service to block blob uploads for media types that aren't valid as configs or layers?\r\n\r\nyes, let's.\r\n\r\nin terms of the stuff in handlers/app.go - this was mostly copying what had been done for the other section of the config. will take care of the docs fix.\r\n\r\n---\r\n\r\n@thaJeztah - \r\n\r\nI was troubled by trying to apply regex to the media types.. I would honestly rather have just a static list as a distribution deploy-er. I'm not sure if its better to follow the convention in the config or do something custom. what do you think? @caervs?\r\n\r\nsomething maybe like" + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition", + "area-config" + ], + "category": "workloads", + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/distribution/distribution/pull/2934", + "sourceRepo": "distribution/distribution", + "reactions": 9, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:47:55.125Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-2973-support-ecs-taskrole-in-s3-storage-driver.json b/solutions/cncf-generated/distribution/distribution-2973-support-ecs-taskrole-in-s3-storage-driver.json new file mode 100644 index 00000000..3a01498d --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-2973-support-ecs-taskrole-in-s3-storage-driver.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:56.706Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "distribution: Support ECS TaskRole in S3 storage driver", + "description": "Instead of constructing the list of credential providers manually, if we use the default list we can take advantage of the AWS SDK checking the environment and returning either the EC2RoleProvider or the generic HTTP credentials provider, configured to use the ECS credentials endpoint.\n\nAlso, use the `defaults.Config()` function instead of `aws.NewConfig()`, as this results in an initialised HTTP client which prevents a fatal error when retrieving credentials from the ECS credentials endpoint.\n\nFixes #2960\n\nSigned-off-by: Andrew Bulford ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR reopens #2973 and fixes still hardcoded list of credential providers:\n\n- `StaticProvider`\n- `EnvVarProvider`\n- `SharedCredentialProvider`\n- `EC2RoleProvider`\n\nInstead it relies on SDK defaults when creating a new AWS Config and AWS Session. And only overrides them with `StaticProvider` if `accesskey` and `secretkey` are provided via `config.yml`:\n\n```yaml\nstorage:\n s3:\n accesskey: ABCDEFG123HY4LWA5TOG\n secretkey: AB1C+D2EF/Ge3cA/vQbUfMVpuDImUZsxkpCQDUy0\n region: us-east-1\n ...\n``` \n\nAs the result, it uses AssumeRoleWithWebIdentity credential provider (in [`session.resolveCredentials`](https://github.com/aws/aws-sdk-go/blob/95871fc3b42a8910f81ec01c74975293dafced97/aws/session/session.go#L630)) that resolves assigned IAM role to K8s Service Accounts via OIDC token. Please see [here](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/) for more details.\n\nThis changed have been tested in the following scenario to grant access to AWS S3 Bucket:\n\n- IAM Role for Service Accounts that're assigned to Pods on EKS cluster\n- IAM Role for EC2 instances\n- Environment Variables: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` on a local machine\n\nUpstreamed from Docker Hub", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "storage:\r\n s3:\r\n accesskey: ABCDEFG123HY4LWA5TOG\r\n secretkey: AB1C+D2EF/Ge3cA/vQbUfMVpuDImUZsxkpCQDUy0\r\n region: us-east-1\r\n ..." + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition", + "area-storage-s3" + ], + "category": "workloads", + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [ + "Role" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/distribution/distribution/pull/2973", + "sourceRepo": "distribution/distribution", + "reactions": 5, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:47:56.706Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-3124-updating-to-allow-irsa-capability.json b/solutions/cncf-generated/distribution/distribution-3124-updating-to-allow-irsa-capability.json new file mode 100644 index 00000000..a524c80f --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-3124-updating-to-allow-irsa-capability.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:49.975Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "distribution: updating to allow IRSA capability ", + "description": "- bumping the aws-sdk-go package to v1.29.22\n- Fixes #3097", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Closing as https://github.com/distribution/distribution/commit/e1464fd3172be169bfaf5f2f16479568c7f0f024 was merged in and updates to a higher version of AWS SDK.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/distribution/distribution/pull/3124", + "sourceRepo": "distribution/distribution", + "reactions": 17, + "comments": 3 + }, + "security": { + "scannedAt": "2026-02-27T17:47:49.975Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-3138-chore-update-azure-go-autorest-dependencies.json b/solutions/cncf-generated/distribution/distribution-3138-chore-update-azure-go-autorest-dependencies.json new file mode 100644 index 00000000..03d19a01 --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-3138-chore-update-azure-go-autorest-dependencies.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:02.629Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "distribution: chore: update azure go-autorest dependencies", + "description": "This updates the go-autorest related dependencies to a recent version which will fix the ambiguous module error.\n\nfixes #3137", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "github.com/dgrijalva/jwt-go is unmaintained and vulnerable to\nCVE-2020-26160. Updating github.com/Azure/go-autorest removes\nthe dependency.\n\nSigned-off-by: Bracken Dawson \n\nfixes #3361", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "../../../go/pkg/mod/sigs.k8s.io/cluster-api-provider-azure@v0.4.0/cloud/errors.go:20:2: ambiguous import: found package github.com/Azure/go-autorest/autorest in multiple modules:\r\n\tgithub.com/Azure/go-autorest v10.8.1+incompatible (/Users/davidjustice/go/pkg/mod/github.com/!azure/go-autorest@v10.8.1+incompatible/autorest)\r\n\tgithub.com/Azure/go-autorest/autorest v0.10.0 (/Users/davidjustice/go/pkg/mod/github.com/!azure/go-autorest/autorest@v0.10.0)" + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition", + "area-vendor" + ], + "category": "workloads", + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/distribution/distribution/pull/3138", + "sourceRepo": "distribution/distribution", + "reactions": 2, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:48:02.629Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-3536-add-option-to-enable-sparse-indexes.json b/solutions/cncf-generated/distribution/distribution-3536-add-option-to-enable-sparse-indexes.json new file mode 100644 index 00000000..297ccb04 --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-3536-add-option-to-enable-sparse-indexes.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:53.512Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "distribution: Add option to enable sparse indexes", + "description": "Enable configuration options that can selectively disable validation that dependencies exist within the registry before the image index is uploaded.\n\nThis enables sparse indexes, where a registry holds a manifest index that could be signed (so the digest must not change) but does not hold every referenced image in the index. The use case for this is when a registry mirror does not need to mirror all platforms, but does need to maintain the digests of all manifests either because they are signed or because they are pulled by digest.\n\nThe registry administrator can also select specific image architectures that must exist in the registry, enabling a registry operator to select only the platforms they care about and ensure all image indexes uploaded to the registry are valid for those platforms.\n\nCloses #3628", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "When inspecting an image index where the platform image for the current\narchitecture is missing, be clearer in the error message about what is\nwrong.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/distribution/distribution/pull/3536?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None) | Coverage Δ | |\n|---|---|---|\n| [registry/handlers/app.go](https://codecov.io/gh/distribution/distribution/pull/3536/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-cmVnaXN0cnkvaGFuZGxlcnMvYXBwLmdv) | `47.61% <42.85%> (-0.74%)` | :arrow_down: |\n| [configuration/configuration.go](https://codecov.io/gh/distribution/distribution/pull/3536/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-Y29uZmlndXJhdGlvbi9jb25maWd1cmF0aW9uLmdv) | `64.15% <61.53%> (-0.24%)` | :arrow_down: |\n| [registry/storage/manifestlisthandler.go](https://codecov.io/gh/distribution/distribution/pull/3536/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-cmVnaXN0cnkvc3RvcmFnZS9tYW5pZmVzdGxpc3RoYW5kbGVyLmdv) | `68.57% <100.00%> (+25.71%)` | :arrow_up: |\n| [registry/storage/registry.go](https://codecov.io/gh/distribution/distribution/pull/3536/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-cmVnaXN0cnkvc3RvcmFnZS9yZWdpc3RyeS5nbw==) | `89.47% <100.00%> (+0.79%)` | :arrow_up: |\n\nHelp us with your feedback. Take ten seconds to tell us [how you rate us](https://about.codecov.io/nps?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None). Have a feature suggestion? [Share it here.](https://app.codecov.io/gh/feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None)\n\nI thought I'd look at the client output for pulling a sparse manifest list where the platform image is missing. I copied the latest alpine index and s390x image to my registry, leaving out the other platforms, and am now attempting to use the manifest list on amd64.\r\n\r\nOverall, everything fails in sensible ways but the error messages could be improved in the clients.\r\n\r\n
\r\nskopeo inspect - error message could be clearer but you can tell its looking for the \"target platform\"", + "
\r\n\r\n
\r\nskopeo copy - It will fail on the first missing image when trying to copy them all, would be better to copy those that exist. Doesn't mention platform when pulling based on system, which could be improved.", + "
\r\n\r\n
\r\ndocker pull - error is different, but doens't mention platform or a manifest list." + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition", + "area-storage", + "area-config", + "area-docs", + "area-api" + ], + "category": "workloads", + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/distribution/distribution/pull/3536", + "sourceRepo": "distribution/distribution", + "reactions": 10, + "comments": 34 + }, + "security": { + "scannedAt": "2026-02-27T17:47:53.512Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-3701-otel-tracing-initial-opentelemetry-support.json b/solutions/cncf-generated/distribution/distribution-3701-otel-tracing-initial-opentelemetry-support.json new file mode 100644 index 00000000..7eeb0f9a --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-3701-otel-tracing-initial-opentelemetry-support.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:57.708Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "distribution: [otel-tracing] Initial opentelemetry support", + "description": "- Closes https://github.com/distribution/distribution/issues/3451\n\nLocal Repose workflows result : \n\"image\"", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I'm aware of this. It's on my list to review indeed.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/distribution/distribution/pull/3701", + "sourceRepo": "distribution/distribution", + "reactions": 5, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:47:57.708Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-3822-fix-garbage-collect-delete-untagged-removes-multi-arch-manifes.json b/solutions/cncf-generated/distribution/distribution-3822-fix-garbage-collect-delete-untagged-removes-multi-arch-manifes.json new file mode 100644 index 00000000..e9214554 --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-3822-fix-garbage-collect-delete-untagged-removes-multi-arch-manifes.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:58.796Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "distribution: fix garbage-collect --delete-untagged removes multi-arch manifests #3178", + "description": "resolve #3178", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@readerx thanks for the PR, please add a few tests in [`registry/storage/garbagecollect_test.go`](https://github.com/distribution/distribution/blob/main/registry/storage/garbagecollect_test.go).\n\nThese lists should cover:\n - a tagged manifestlist with untagged manifest references having the manifestlist and references still present after garbage collection\n - an untagged manifestlist with untagged references having the manifestlist and references removed after garbage collection\n - an untagged manifestlist with tagged references having the manifestlist removed, but the references still present after garbage collection\n - a tagged manifestlist with a deleted reference — garbage collection should continue without error here", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None) | Coverage Δ | |\n|---|---|---|\n| [registry/storage/garbagecollect.go](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-cmVnaXN0cnkvc3RvcmFnZS9nYXJiYWdlY29sbGVjdC5nbw==) | `66.14% <77.55%> (+7.19%)` | :arrow_up: |\n| [registry/storage/vacuum.go](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-cmVnaXN0cnkvc3RvcmFnZS92YWN1dW0uZ28=) | `28.81% <0.00%> (-10.17%)` | :arrow_down: |\n| [manifest/schema1/sign.go](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-bWFuaWZlc3Qvc2NoZW1hMS9zaWduLmdv) | `21.42% <0.00%> (ø)` | |\n| [manifest/schema1/verify.go](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-bWFuaWZlc3Qvc2NoZW1hMS92ZXJpZnkuZ28=) | `23.07% <0.00%> (ø)` | |\n| [manifest/schema1/manifest.go](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-bWFuaWZlc3Qvc2NoZW1hMS9tYW5pZmVzdC5nbw==) | `33.82% <0.00%> (ø)` | |\n| [configuration/configuration.go](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-Y29uZmlndXJhdGlvbi9jb25maWd1cmF0aW9uLmdv) | `64.38% <0.00%> (ø)` | |\n| [manifest/schema1/config\\_builder.go](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-bWFuaWZlc3Qvc2NoZW1hMS9jb25maWdfYnVpbGRlci5nbw==) | `71.65% <0.00%> (ø)` | |\n| [manifest/schema1/reference\\_builder.go](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-bWFuaWZlc3Qvc2NoZW1hMS9yZWZlcmVuY2VfYnVpbGRlci5nbw==) | `94.00% <0.00%> (ø)` | |\n| [registry/handlers/app.go](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-cmVnaXN0cnkvaGFuZGxlcnMvYXBwLmdv) | `47.20% <0.00%> (+0.26%)` | :arrow_up: |\n\nHelp us with your feedback. Take ten seconds to tell us [how you rate us](https://about.codecov.io/nps?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None). Have a feature suggestion? [Share it here.](https://app.codecov.io/gh/feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None)\n\n
\n\n[:umbrella: View full report at Codecov](https://codecov.io/gh/distribution/distribution/pull/3822?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None). \n:loudspeaker: Do you have feedback about the report comment? [Let us know in this issue](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None).\n\n@deleteriousEffect Is there anything else that needs to be modified? \n@readerx sorry, we're waiting on another maintainer to review, we need two approvals.\n> @readerx sorry, we're waiting on another maintainer to review, we need two approvals.\r\n\r\nOk, I see. Thank you for your reply\nHaven't done a review, but we probably want to clean up some of the commits before merging (at least I see there's some fix up commits in the PR)\n> Haven't done a review, but we probably want to clean up some of the commits before merging (at least I see there's some fix up commits in the PR)\r\n\r\nDo I need to rebase some submit?\n> Haven't done a review, but we probably want to clean up some of the commits before merging (at least I see there's some fix up commits in the PR)\r\n\r\n@thaJeztah \r\n\r\nI have merged the commits\r\n\nI'm having the same issue on my local registry.\r\n\r\nJust built and tested @readerx version and from an user point of view it works as expected.\r\n\r\nTested:\r\n1. It doesn't purge multi-arch layers\r\n2. It does correctly purge unreferenced layers when an image is updated\r\n\r\n\r\n(1) was tested with the following snippet, which fails on master but is ok on this branch:", + "After running this patch for some time, we've run into the same problem in some occasions. The garbage collector fails trying to delete non existing files:", + "Any hint on where the problem could be would be appreciated. \r\n\r\nTIA\n> After running this patch for some time, we've run into the same problem in some occasions. The garbage collector fails trying to delete non existing files:\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/distribution/distribution/pull/3822", + "sourceRepo": "distribution/distribution", + "reactions": 4, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:47:58.796Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-3827-concurrent-lookup-in-tagstore.json b/solutions/cncf-generated/distribution/distribution-3827-concurrent-lookup-in-tagstore.json new file mode 100644 index 00000000..3398c497 --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-3827-concurrent-lookup-in-tagstore.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:00.974Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "distribution: Concurrent Lookup in TagStore", + "description": "Based on https://github.com/distribution/distribution/pull/3589 (boldly stealed the tests)\nChannel aware limiter approach\nFixes https://github.com/distribution/distribution/issues/3525 \nTested on 2.6 TiB (~160k blobs and manifests eligible for deletion) repo cleaned down to 500GiB less than 24h (Harbor project)", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@Antiarchitect can you please fix the CI. Seems like it doesnt pass the linter check.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/distribution/distribution/pull/3827", + "sourceRepo": "distribution/distribution", + "reactions": 3, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:48:00.974Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-4285-fix-garbage-collect-delete-untagged-to-handle-schema-2-manifes.json b/solutions/cncf-generated/distribution/distribution-4285-fix-garbage-collect-delete-untagged-to-handle-schema-2-manifes.json new file mode 100644 index 00000000..9ce55396 --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-4285-fix-garbage-collect-delete-untagged-to-handle-schema-2-manifes.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:51.720Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "distribution: Fix garbage-collect --delete-untagged to handle schema 2 manifest list and OCI image index", + "description": "resolves #3178", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "3.0.0-alpha1 with https://github.com/distribution/distribution/pull/4285 applied", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition", + "area-storage", + "area-ci" + ], + "category": "workloads", + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/distribution/distribution/pull/4285", + "sourceRepo": "distribution/distribution", + "reactions": 11, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:47:51.721Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-11007-http-local-reply-mapper.json b/solutions/cncf-generated/envoy/envoy-11007-http-local-reply-mapper.json new file mode 100644 index 00000000..ce58d43c --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-11007-http-local-reply-mapper.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:19.726Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: http: local reply mapper", + "description": "This is revive of https://github.com/envoyproxy/envoy/pull/8921\n\nDescription: \n* Allows to create custom mappers of response code based on access_log filters. \n* Allows to map error response to custom in Text or Json format.\n\nRisk Level: Low\nTesting: unit test and integration test.\nDocs Changes: yes\nRelease Notes: \nFixes https://github.com/envoyproxy/envoy/issues/7537\nFollow up https://github.com/envoyproxy/envoy/pull/8126", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Signed-off-by: Wayne Zhang \n\naccess_log_format config could be used by LocalReplyConfig. \n\n1) Named it as SubstitutionFormatString,\n2) Moved it from `envoy/extensions/access_logger/file` into `/envoy/config/core`\n2) Added substitution_format_string.{cc, h} in `source/common/common`\n\nRequired by: https://github.com/envoyproxy/envoy/pull/11007\n\nRisk Level: None\nTesting: Unit test\nDocs Changes: Noe\nRelease Notes: Noe", + "steps": [ + "Named it as SubstitutionFormatString,", + "Moved it from `envoy/extensions/access_logger/file` into `/envoy/config/core`", + "Added substitution_format_string.{cc, h} in `source/common/common`" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/11007", + "sourceRepo": "envoyproxy/envoy", + "reactions": 3, + "comments": 26 + }, + "security": { + "scannedAt": "2026-02-27T17:46:19.726Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-12998-compression-add-brotli-compressor-and-decompressor.json b/solutions/cncf-generated/envoy/envoy-12998-compression-add-brotli-compressor-and-decompressor.json new file mode 100644 index 00000000..f8c4cb12 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-12998-compression-add-brotli-compressor-and-decompressor.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:13.306Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: compression: add brotli compressor and decompressor", + "description": "Commit Message: compression: add brotli compressor and decompressor\nAdditional Description: Add new brotli compression extensions in addition to gzip.\nRisk Level: Low, no existing functionality is touched\nTesting: uni tests, manual tests with curl.\nDocs Changes: updated docs for compression and decompression HTTP filters to refer the new available encoder/decoder.\nRelease Notes: updated current.rst\nFixes #4429\n\nThe PR adds a new dependency on https://github.com/google/brotli. Here's the current criteria answers:\n\n| Criteria | Answer |\n|---------|---------|\n| Cloud Native Computing Foundation (CNCF) approved license | MIT |\n| Dependencies must not substantially increase the binary size unless they are optional | brotli's binary size built with `-c opt` is 752K |\n| No duplication of existing dependencies | no other dep provides Brotli |\n| Hosted on a git repository and the archive fetch must directly reference this repository. | https://github.com/google/brotli |\n| CVE history appears re", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/retest", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/12998", + "sourceRepo": "envoyproxy/envoy", + "reactions": 4, + "comments": 58 + }, + "security": { + "scannedAt": "2026-02-27T17:46:13.306Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-13176-support-slow-start-mode-in-envoy.json b/solutions/cncf-generated/envoy/envoy-13176-support-slow-start-mode-in-envoy.json new file mode 100644 index 00000000..41fa884b --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-13176-support-slow-start-mode-in-envoy.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:05.245Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: Support slow Start mode in Envoy", + "description": "Signed-off-by: Kateryna Nezdolii \n\nSupport progressive traffic increase in Envoy, implementation is according to design doc: https://docs.google.com/document/d/1NiG1X0gbfFChjl1aL-EE1hdfYxKErjJ2688wJZaj5a0/edit\n\nAdditional Description: Please refer to RFC\nRisk Level: Medium\nTesting: Done\nDocs Changes: Done\nRelease Notes: Done\nFixes #11050", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "Signed-off-by: Kateryna Nezdolii \n\nDescription: Introducing \"host creation time\" field into host description. This change is prerequisite for supporting slow start in Envoy (https://github.com/envoyproxy/envoy/pull/13176) and as it touches lots of code, it was decided to factor it out into dedicated PR.\nRisk Level: Medium\nTesting: In progress\nDocs Changes: NA", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "if (host->health() == Upstream::Host::Health::Healthy &&\r\n !host->healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC))", + "new_weight = weight * time_bias * (time_since_start / slow_start_time)", + "new_weight = weight * time_bias * (time_since_start / slow_start_time) ^ (1 / aggression)" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking", + "deps" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/13176", + "sourceRepo": "envoyproxy/envoy", + "reactions": 11, + "comments": 61 + }, + "security": { + "scannedAt": "2026-02-27T17:46:05.245Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-14168-oauth2-filter-make-oauth-scopes-configurable.json b/solutions/cncf-generated/envoy/envoy-14168-oauth2-filter-make-oauth-scopes-configurable.json new file mode 100644 index 00000000..91b25257 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-14168-oauth2-filter-make-oauth-scopes-configurable.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:12.109Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: oauth2 filter: Make OAuth scopes configurable. ", + "description": "Commit Message: Makes OAuth scopes configurable.\nNew optional parameter 'auth_scopes' added to the filter. The default value is 'user' (if not provided) to avoid breaking changes to users updating to the latest version.\n\nAdditional Description: Added log line to help debugging.\nRisk Level: Low\nTesting: Unit tests updated to match and cover the new parameter. Locally tested the generated docker image.\nDocs Changes: Added the new parameter to the docs\nRelease Notes: Updated current.rst file\nPlatform Specific Features:\nFixes #13766\nReplaces #14034", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi @andreyprezotto, welcome and thank you for your contribution.\n\nWe will try to review your Pull Request as quickly as possible.\n\nIn the meantime, please take a look at the [contribution guidelines](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md) if you have not done so already.\n\n
\n\t:cat:\n\nCaused by: https://github.com/envoyproxy/envoy/pull/14168 was opened by andreyprezotto.\n\nsee: [more](https://github.com/envoyproxy/envoy/pull/14168), [trace](https://prod.repokitteh.app/traces/ui/envoyproxy/envoy/2c9446f0-2e92-11eb-8ff6-79e1931c54af).\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/14168", + "sourceRepo": "envoyproxy/envoy", + "reactions": 4, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:46:12.109Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-14884-tls-implement-spiffe-certificate-validator-for-independent-multiple-.json b/solutions/cncf-generated/envoy/envoy-14884-tls-implement-spiffe-certificate-validator-for-independent-multiple-.json new file mode 100644 index 00000000..34115e6c --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-14884-tls-implement-spiffe-certificate-validator-for-independent-multiple-.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:11.216Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: tls: implement SPIFFE Certificate Validator for independent multiple trust domain support", + "description": "Signed-off-by: Takeshi Yoneda \n\nThis is the sequel to the previous refactoring PR: https://github.com/envoyproxy/envoy/pull/14757\n\nCommit Message: tls: implement SPIFFE Certificate Validator for independent multiple trust domain support.\nAdditional Description: Adds the extension point for certificate validations, and its first implementation for SPIFFE multi trust domain support in a single listener or cluster. Resolves https://github.com/envoyproxy/envoy/issues/14614 and https://github.com/envoyproxy/envoy/issues/9284. \nRisk Level: low (only adding the new extension point and one implementation for it)\nTesting: unit tests and integration tests.\nDocs Changes: \nRelease Notes: tls: implement SPIFFE Certificate Validator for independent multiple trust domain support.\n\ncc @lizan", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/retest", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/14884", + "sourceRepo": "envoyproxy/envoy", + "reactions": 4, + "comments": 30 + }, + "security": { + "scannedAt": "2026-02-27T17:46:11.216Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-15619-http-add-http-1-1-case-preservation.json b/solutions/cncf-generated/envoy/envoy-15619-http-add-http-1-1-case-preservation.json new file mode 100644 index 00000000..f21400b6 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-15619-http-add-http-1-1-case-preservation.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:14.425Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: http: add HTTP/1.1 case preservation", + "description": "1) Add new stateful header formatter extension point\n2) Add preserve case formatter extension\n\nFixes https://github.com/envoyproxy/envoy/issues/14363\n\nRisk Level: Low\nTesting: New integration test\nDocs Changes: Added\nRelease Notes: Added\nPlatform Specific Features: N/A", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@asraa @alyssawilk @jmarantz @snowp PTAL. I'm pretty happy with how this turned out. It's quite clean and the perf overhead when not in use should be negligible.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking", + "api" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/15619", + "sourceRepo": "envoyproxy/envoy", + "reactions": 4, + "comments": 24 + }, + "security": { + "scannedAt": "2026-02-27T17:46:14.425Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-16298-jwt-authn-support-fetching-jwks-in-the-background.json b/solutions/cncf-generated/envoy/envoy-16298-jwt-authn-support-fetching-jwks-in-the-background.json new file mode 100644 index 00000000..2c55ad00 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-16298-jwt-authn-support-fetching-jwks-in-the-background.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:20.867Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: jwt_authn: support fetching jwks in the background", + "description": "It is to fix: https://github.com/envoyproxy/envoy/issues/14556#issuecomment-754203164\n\nCurrently, remote Jwks is fetched on-demand, in the worker thread after the requests come. The first few requests need to pause to wait for the Jwks. \n\nAdd a new feature to fetch remote Jwks in the main thread, before the listener is activated.\n\nDetail changes:\n* Change the filter config to add async_fetch field inside RemoteJwks message\n* Add a new class: JwksAsyncFetcher class to handle this new config.\n* Add two new statistics counters `jwks_fetch_success` and `jwks_fetch_fail`.\n\nRisk Level: Low since new feature is guarded by the new config.\nTesting: unit-tested and integration tested\nDocs Changes: None\nRelease Notes: Yes", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Can you check CI?\n\n@rojkov let me know once the implementation looks good to you, then I will take a second look.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "ci/run_envoy_docker.sh `ci/do_ci.sh bazel.debug //test/extensions/filters/http/jwt_authn:filter_integration_test`", + "ERROR: /source/source/exe/BUILD:47:17: C++ compilation of rule '//source/exe:envoy_main_entry_lib' failed (Exit 1): gcc failed: error executing command \r\ngcc: fatal error: cannot execute ‘/usr/lib/gcc/x86_64-linux-gnu/9/cc1plus’: execv: Argument list too long" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/16298", + "sourceRepo": "envoyproxy/envoy", + "reactions": 3, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:46:20.867Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-31831-xff-add-support-for-configuring-a-list-of-trusted-cidrs.json b/solutions/cncf-generated/envoy/envoy-31831-xff-add-support-for-configuring-a-list-of-trusted-cidrs.json new file mode 100644 index 00000000..2df92f4d --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-31831-xff-add-support-for-configuring-a-list-of-trusted-cidrs.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:06.455Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: xff: add support for configuring a list of trusted CIDRs", + "description": "Commit Message: xff: add support for configuring a list of trusted CIDRs\n\nThe original client IP address can be determined from the x-forwarded-for header either by a fixed number of trusted hops, or by evaluating the client IP address against a list of trusted addresses.\n\nThis adds support for configuring a list of CIDRs in the xff original IP detection extension. The remote IP address is evaluated against these, and optionally recurses through XFF to find the last non-trusted address.\n\nAdditional Description:\nThis feature is generally used by people with a CDN in front of their edge proxy to ensure that XFF is only parsed when the remote connection comes from a CDN server.\n\nThe behaviour of the new functionality should be the same as Nginx's `realip` module.\n\nDisclaimer: This is my first time writing C++ so I'm not certain my changes are completely idiomatic, but I've tried to stick with existing style in the codebase. Feedback very welcome!\n\nRisk Level: Medium\nTesting: Unit tests, m", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Can you merge main?\n\n/wait", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/31831", + "sourceRepo": "envoyproxy/envoy", + "reactions": 7, + "comments": 28 + }, + "security": { + "scannedAt": "2026-02-27T17:46:06.455Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-32861-proxy-protocol-filter-add-configuration-to-match-only-specific-proxy.json b/solutions/cncf-generated/envoy/envoy-32861-proxy-protocol-filter-add-configuration-to-match-only-specific-proxy.json new file mode 100644 index 00000000..30e20283 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-32861-proxy-protocol-filter-add-configuration-to-match-only-specific-proxy.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:16.847Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: proxy_protocol_filter: Add configuration to match only specific proxy protocol versions, new stats", + "description": "Commit Message:\nproxy_protocol_filter: Configuration to match only specific proxy protocol versions, new stats\n\nAdditional Description:\nCurrently the Proxy Protocol Listener filter will try to match incoming connections against both proxy protocol v1 and v2 signatures. While this is convenient, it:\n- Increases the attack surface of the filter.\n- (when `allow_requests_without_proxy_protocol` is enabled) Increases the chance of signature conflicts between proxy protocol v1 requests and non-proxy protocol requests.\n\nThis change adds a new config option `disallowed_versions` that scopes down the set of proxy protocol versions that the filter matches. The configuration is optional and defaults to current behavior when not specified.\n\nThis change also adds new statistics per matched proxy protocol version. See doc update for details.\n\n```\ndownstream_cx_proxy_proto.not_found_disallowed\ndownstream_cx_proxy_proto.not_found_allowed\ndownstream_cx_proxy_proto.v1.found\ndownstream_cx_proxy_proto.v1.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "CC @envoyproxy/api-shepherds: Your approval is needed for changes made to `(api/envoy/|docs/root/api-docs/)`.\nenvoyproxy/api-shepherds assignee is @markdroth\nCC @envoyproxy/api-watchers: FYI only for changes made to `(api/envoy/|docs/root/api-docs/)`.\n\n
\n\t:cat:\n\nCaused by: https://github.com/envoyproxy/envoy/pull/32861 was opened by nareddyt.\n\nsee: [more](https://github.com/envoyproxy/envoy/pull/32861), [trace](https://prod.repokitteh.app/traces/ui/envoyproxy/envoy/0fa545a0-e0c6-11ee-954d-c0906d966594).\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "downstream_cx_proxy_proto.not_found_disallowed\r\ndownstream_cx_proxy_proto.not_found_allowed\r\ndownstream_cx_proxy_proto.v1.found\r\ndownstream_cx_proxy_proto.v1.disallowed\r\ndownstream_cx_proxy_proto.v1.error\r\ndownstream_cx_proxy_proto.v2.found\r\ndownstream_cx_proxy_proto.v2.disallowed\r\ndownstream_cx_proxy_proto.v2.error" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/32861", + "sourceRepo": "envoyproxy/envoy", + "reactions": 4, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:46:16.847Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-36369-tls-support-for-ecdsa-p-384-and-p-521-certificates.json b/solutions/cncf-generated/envoy/envoy-36369-tls-support-for-ecdsa-p-384-and-p-521-certificates.json new file mode 100644 index 00000000..b4c83785 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-36369-tls-support-for-ecdsa-p-384-and-p-521-certificates.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:15.572Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: tls: support for ECDSA P-384 and P-521 certificates", + "description": "Commit Message: tls: support for ECDSA P-384 and P-521 certificates (#10855)\n\nAdditional Description: Commercial National Security Algorithm Suite (CNSA) requires ECDSA keys be specified with P-384 curves. The assertion that there are [no security benefits to curves higher than P-256](https://github.com/envoyproxy/envoy/pull/5224#issue-387770091) is no longer true. This change is intended to limit the adoptable curves to P-384 and P-521.\n\nRisk Level: Medium - removal of limitation of curves to be used for ECDSA certificates, with [potential misconfiguration and DoS risks](https://github.com/envoyproxy/envoy/issues/10855#issuecomment-618023133) mentioned in previous discourse on the issue. This risk is mitigated in this PR, however, by continuing to expressly limit the type of EC keys accepted to those associated with the P-256, P-384 or P-521 curves and no others.\n\nTesting: Testing using unit and integration tests\n\nRan build envoy artefact locally with below config:\n\n```\n---\nadmin:\n a", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> @ggreenway Do you recall why only P-256 curve as supported initially? I think it's the only FIPS one back in the days, was it because of that?\n\nhttps://github.com/envoyproxy/envoy/issues/10855#issuecomment-618023133 explains it", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "---\r\nadmin:\r\n address:\r\n socket_address:\r\n address: 127.0.0.1\r\n port_value: 9901\r\nstatic_resources:\r\n listeners:\r\n - name: listener_0\r\n address:\r\n socket_address:\r\n address: 127.0.0.1\r\n port_value: 10000\r\n filter_chains:\r\n - filters:\r\n - name: envoy.filters.network.http_connection_manager\r\n typed_config:\r\n \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\r\n stat_prefix: ingress_http\r\n codec_type: AUTO\r\n route_config:\r\n name: local_route\r\n virtual_hosts:\r\n - name: local_service\r\n domains:\r\n - \"*\"\r\n routes:\r\n - match:\r\n prefix: /\r\n route:\r\n cluster: some_service\r\n http_filters:\r\n - name: envoy.filters.http.router\r\n typed_config:\r\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\r\n transport_socket:\r\n name: envoy.transport_sockets.tls\r\n typed_config:\r\n \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\r\n common_tls_context:\r\n tls_certificates:\r\n - certificate_chain: {filename: \"test/common/tls/test_data/selfsigned_ecdsa_p384_cert.pem\"}\r\n private_key: {filename: \"test/common/tls/test_data/selfsigned_ecdsa_p384_key.pem\"}\r\n clusters:\r\n - name: some_service\r\n connect_timeout: 0.25s\r\n type: STATIC\r\n lb_policy: ROUND_ROBIN\r\n load_assignment:\r\n cluster_name: some_service\r\n endpoints:\r\n - lb_endpoints:\r\n - endpoint:\r\n address:\r\n socket_address:\r\n address: 127.0.0.1\r\n port_value: 1234", + "Connecting to 127.0.0.1\r\nCONNECTED(00000003)\r\nCan't use SSL_get_servername\r\ndepth=0 C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\nverify error:num=18:self-signed certificate\r\nverify return:1\r\ndepth=0 C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\nverify return:1\r\n---\r\nCertificate chain\r\n 0 s:C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\n i:C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\n a:PKEY: id-ecPublicKey, 384 (bit); sigalg: ecdsa-with-SHA256\r\n v:NotBefore: Aug 21 19:14:10 2024 GMT; NotAfter: Aug 21 19:14:10 2026 GMT\r\n---\r\nServer certificate\r\n-----BEGIN CERTIFICATE-----\r\nMIIC0jCCAlegAwIBAgIUUv13YuIFYMJxp1t4z8Z7H0cFdHowCgYIKoZIzj0EAwIw\r\nejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh\r\nbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5naW5l\r\nZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMB4XDTI0MDgyMTE5MTQxMFoXDTI2\r\nMDgyMTE5MTQxMFowejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\r\nFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM\r\nEEx5ZnQgRW5naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMHYwEAYHKoZI\r\nzj0CAQYFK4EEACIDYgAEtFQWaGrCFUT70YVGv9IA0H1d/fUGdoATjqAQlgOnzWf4\r\nFcJIqRQ8dGJ0wom/p8b/3MrKpy8wpWBnAo2C9+9owGdOqcqSIFLVV0iaGogKhIAx\r\n7KAjWoMEpal4uNnaYLlCo4GdMIGaMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXg\r\nMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAeBgNVHREEFzAVghNzZXJ2\r\nZXIxLmV4YW1wbGUuY29tMB0GA1UdDgQWBBQ23kFgk8ELq1P0xW3R8SYRwJRcyjAf\r\nBgNVHSMEGDAWgBQ23kFgk8ELq1P0xW3R8SYRwJRcyjAKBggqhkjOPQQDAgNpADBm\r\nAjEA6FC5eEaKcV7i9AUuVsIJruDKqLVmSLKzHX+DVxOvaxQcTuKMwtg8AuTq1qq+\r\nMZ8EAjEA3JKxxjQAp2hi2gvSUGXQqk3seETImDNmUdWXmYcohDRM36KKJORqXoui\r\njD+/8ipt\r\n-----END CERTIFICATE-----\r\nsubject=C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\nissuer=C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\n---\r\nNo client certificate CA names sent\r\nPeer signing digest: SHA384\r\nPeer signature type: ECDSA\r\nServer Temp Key: X25519, 253 bits\r\n---\r\nSSL handshake has read 1062 bytes and written 379 bytes\r\nVerification error: self-signed certificate\r\n---\r\nNew, TLSv1.3, Cipher is TLS_AES_256_GCM_SHA384\r\nServer public key is 384 bit\r\nThis TLS version forbids renegotiation.\r\nCompression: NONE\r\nExpansion: NONE\r\nNo ALPN negotiated\r\nEarly data was not sent\r\nVerify return code: 18 (self-signed certificate)\r\n---" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking", + "deps" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [ + "Service", + "Ingress" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/36369", + "sourceRepo": "envoyproxy/envoy", + "reactions": 4, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:46:15.572Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-36623-localratelimit-http-add-dynamic-token-bucket-support.json b/solutions/cncf-generated/envoy/envoy-36623-localratelimit-http-add-dynamic-token-bucket-support.json new file mode 100644 index 00000000..66c14627 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-36623-localratelimit-http-add-dynamic-token-bucket-support.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:08.698Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: LocalRateLimit(HTTP): Add dynamic token bucket support", + "description": "Commit Message: LocalRateLimit(HTTP): Add dynamic token bucket support\nAdditional Description:\nfixes: https://github.com/envoyproxy/envoy/issues/23351 and https://github.com/envoyproxy/envoy/issues/19895\n\nUser configures [descriptors](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/common/ratelimit/v3/ratelimit.proto#envoy-v3-api-msg-extensions-common-ratelimit-v3-localratelimitdescriptor) in the [http local rate limit filter](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto). These descriptors are the \"target\" to match using the source descriptors built using the traffic(http requests). Only matched traffic will be rate limited. When request comes, at runtime, based on [rate_limit](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-msg-config-route-v3-ratelimit) configuration, descriptors are generated where `values` are picked from the request as direc", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I think we still need a way to limit the overhead and memory of the token buckets. It's unacceptable to let it increases unlimited.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking", + "no-stalebot" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/36623", + "sourceRepo": "envoyproxy/envoy", + "reactions": 5, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:46:08.698Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-37642-http-configurable-ignore-of-http-1-1-upgrades.json b/solutions/cncf-generated/envoy/envoy-37642-http-configurable-ignore-of-http-1-1-upgrades.json new file mode 100644 index 00000000..cb992701 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-37642-http-configurable-ignore-of-http-1-1-upgrades.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:03.167Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: http: configurable ignore of HTTP/1.1 upgrades", + "description": "Fixes https://github.com/envoyproxy/envoy/issues/36305\n\nAdd configuration to ignore HTTP/1.1 Upgrade headers . See https://datatracker.ietf.org/doc/html/rfc7230#section-6.7:\n\nRisk Level: Medium\nTesting: new unit tests\nDocs Changes: in with APIs\nRelease Notes: inline", + "type": "upgrade", + "status": "completed", + "resolution": { + "summary": "note: envoy maintainers will be mostly on vacation until 6th jan\n\n/wait-any", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Connection: Upgrade\r\nUpgrade: TLS/1.3", + "Connection: Upgrade\r\nUpgrade: TLS/1.2", + "Connection: Upgrade\r\nUpgrade: TLS/1.2, TLS/1.3" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/37642", + "sourceRepo": "envoyproxy/envoy", + "reactions": 23, + "comments": 32 + }, + "security": { + "scannedAt": "2026-02-27T17:46:03.167Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-38550-cache-redis-backend.json b/solutions/cncf-generated/envoy/envoy-38550-cache-redis-backend.json new file mode 100644 index 00000000..6a1db1d6 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-38550-cache-redis-backend.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:21.739Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: cache: redis backend", + "description": "Commit Message:\n\nAdditional Description:\nSee https://github.com/cpakulski/envoy/blob/issue/27154/source/extensions/http/cache/redis_http_cache/DESIGN.md for more info.\nRisk Level: Low\nTesting: TBD\nDocs Changes: TBD\nRelease Notes: TBD\nPlatform Specific Features:\nFixes #27154", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "As a reminder, PRs marked as draft will not be automatically assigned reviewers,\nor be handled by maintainer-oncall triage.\n\nPlease mark your PR as ready when you want it to be reviewed!\n\n
\n\t:cat:\n\nCaused by: https://github.com/envoyproxy/envoy/pull/38550 was opened by cpakulski.\n\nsee: [more](https://github.com/envoyproxy/envoy/pull/38550), [trace](https://prod.repokitteh.app/traces/ui/envoyproxy/envoy/f36ce730-f2f3-11ef-9319-6ec6640ccf9e).\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking", + "stale", + "api" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/38550", + "sourceRepo": "envoyproxy/envoy", + "reactions": 3, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:46:21.739Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-40653-contrib-implement-peak-ewma-load-balancing-policy.json b/solutions/cncf-generated/envoy/envoy-40653-contrib-implement-peak-ewma-load-balancing-policy.json new file mode 100644 index 00000000..aa0cd5fb --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-40653-contrib-implement-peak-ewma-load-balancing-policy.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:18.030Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: contrib: implement Peak EWMA load balancing policy", + "description": "# Commit Message\n\nAdds Peak EWMA (Exponentially Weighted Moving Average) load balancing policy that uses Power of Two Choices algorithm with real-time RTT measurements for latency-aware request routing.\n\nKey components:\n- Load balancer: `envoy.load_balancing_policies.peak_ewma`\n- HTTP filter: `envoy.filters.http.peak_ewma for RTT measurement`\n- Configuration: `decay_time`, `aggregation_interval`, `max_samples_per_host`, `default_rtt`, `penalty_value`\n\nImplementation uses lock-free atomic ring buffers for RTT sample collection and host-attached storage pattern. Draws from Finagle's Peak EWMA algorithm while avoiding locks, and patterns after Envoy's existing client-side WRR load balancing implementation for main/worker thread coordination.\n\nFixes #20907\n\n# Additional Description\n\nThis PR implements a new contrib load balancing policy based on the Peak EWMA (Exponentially Weighted Moving Average) algorithm, which provides latency-aware request routing using real-time RTT measurements.\n\nT", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@rroblak CI won't run because of the DCO check failing. You can follow the [instructions here](https://github.com/envoyproxy/envoy/pull/40653/checks?check_run_id=47725816845) to fix it and let the baseline tests run.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking", + "contrib" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/40653", + "sourceRepo": "envoyproxy/envoy", + "reactions": 4, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:46:18.030Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-560-allow-healthchecks-to-be-performed-on-another-port.json b/solutions/cncf-generated/envoy/envoy-560-allow-healthchecks-to-be-performed-on-another-port.json new file mode 100644 index 00000000..ffc47dbc --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-560-allow-healthchecks-to-be-performed-on-another-port.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:10.102Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: Allow healthchecks to be performed on another port", + "description": "Please find a pull request to allow one to configure a specific port for the health checks.\n\nfixes https://github.com/lyft/envoy/issues/439", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This provides an alternative health check port, if set it allows an\nupstream host to have different health check address port.\n\nRef: https://github.com/envoyproxy/envoy/issues/439\n\nSigned-off-by: Dhi Aurrahman ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/560", + "sourceRepo": "envoyproxy/envoy", + "reactions": 4, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:46:10.102Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-6379-wip-dns-add-support-for-srv-records-in-dns-lookup.json b/solutions/cncf-generated/envoy/envoy-6379-wip-dns-add-support-for-srv-records-in-dns-lookup.json new file mode 100644 index 00000000..75b31509 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-6379-wip-dns-add-support-for-srv-records-in-dns-lookup.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:07.661Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "envoy: [WIP] dns: add support for SRV records in DNS lookup", + "description": "Description: This adds support for SRV records in DNS lookup by introducing a new `SrvInstance` type which holds a regular `Address::Instance` object ~along with `priority` and `weight` information~.\nRisk Level: Med\nTesting: Pending\nDocs Changes: Pending\nRelease Notes: Pending\nFixes #125\nRelated #517\n\nThis PR is currently a WIP. Wiring, configuration, tests, documentation, etc. will be added once the implementation looks okay.\n\n/cc @mattklein123 @htuch", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "WIP", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking", + "stale", + "waiting" + ], + "category": "networking", + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/envoyproxy/envoy/pull/6379", + "sourceRepo": "envoyproxy/envoy", + "reactions": 6, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:46:07.661Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/eraser/eraser-191-helmify.json b/solutions/cncf-generated/eraser/eraser-191-helmify.json new file mode 100644 index 00000000..4aa67659 --- /dev/null +++ b/solutions/cncf-generated/eraser/eraser-191-helmify.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:07.920Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "eraser: Helmify", + "description": "Generate Helm chart automatically\n\nFixes #77", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "Helm chart to avoid installation via static YAML file\n\nfixes #77", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "{\"level\":\"error\",\"ts\":1652934541.6843162,\"logger\":\"controller.imagejob-controller\",\"msg\":\"Reconciler error\",\"name\":\"imagejob-lznmt\",\"namespace\":\"\",\"error\":\"reconcile new: ImageList.eraser.sh \\\"\\\" not found\",\"stacktrace\":\"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\\n\\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.11.1/pkg/internal/controller/controller.go:266\\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\\n\\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.11.1/pkg/internal/controller/controller.go:227\"}" + ] + } + }, + "metadata": { + "tags": [ + "eraser", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "eraser" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/eraser-dev/eraser/pull/191", + "sourceRepo": "eraser-dev/eraser", + "reactions": 1, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:48:07.920Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/etcd/etcd-11375-etcdserver-fix-watch-metrics.json b/solutions/cncf-generated/etcd/etcd-11375-etcdserver-fix-watch-metrics.json new file mode 100644 index 00000000..a7301a4d --- /dev/null +++ b/solutions/cncf-generated/etcd/etcd-11375-etcdserver-fix-watch-metrics.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:39.165Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "etcd: etcdserver: fix watch metrics", + "description": "Currently, when a client closes context during watch we pass. `codes.Unavailable` to `status.New()` via `rpctypes.ErrGRPCNoLeader`[1],[2] this inadvertently registers `Unavailable` in Prometheus metrics which causes an issue as `Unavailable` indicates the service is currently unavailable [3]. This PR changes the logic for how we conclude the leader is lost by observing `RaftStatusGetter.Leader()`[4] for `raft.None`. Only then do we return Unavailable (no leader) otherwise Canceled.\n\nFixes #10289 #9725 #9576 #9166\n\n[1] https://github.com/etcd-io/etcd/pull/11375/files#diff-8a4ebdea7c0a8a8926fca73c3058b0b9L200\n[2] - https://github.com/etcd-io/etcd/blob/0fb26df249f1cd4982c49ef125a3b313dfbde7d6/etcdserver/api/v3rpc/rpctypes/error.go#L68\n[3] https://github.com/grpc/grpc-go/blob/master/codes/codes.go#L140\n[4] - https://github.com/etcd-io/etcd/blob/bbe1e78e6242a57d54c4b96d8c49ea1e094c3cbb/etcdserver/server.go#L1907", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Before this patch, a client which cancels the context for a watch results in the\nserver generating a `rpctypes.ErrGRPCNoLeader` error that leads the recording of\na gRPC `Unavailable` metric in association with the client watch cancellation.\nThe metric looks like this:\n\n grpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Watch\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}\n\nSo, the watch server has misidentified the error as a server error and then\npropagates the mistake to metrics, leading to a false indicator that the leader\nhas been lost. This false signal then leads to false alerting.\n\nThe commit 9c103dd0dedfc723cd4f33b6a5e81343d8a6bae7 introduced an interceptor which wraps\nwatch streams requiring a leader, causing those streams to be actively canceled\nwhen leader loss is detected.\n\nHowever, the error handling code assumes all stream context cancellations are\nfrom the interceptor. This assumption is broken when the context was canceled\nbecause of a client stream cancelation.\n\nThe core challenge is lack of information conveyed via `context.Context` which\nis shared by both the send and receive sides of the stream handling and is\nsubject to cancellation by all paths (including the gRPC library itself). If any\npiece of the system cancels the shared context, there's no way for a context\nconsumer to understand who cancelled the context or why.\n\nTo solve the ambiguity of the stream interceptor code specifically, this patch\nintroduces a custom context s", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [etcdserver/api/v3rpc/rpctypes/error.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvcnBjdHlwZXMvZXJyb3IuZ28=) | `90.47% <ø> (ø)` | :arrow_up: |\n| [etcdserver/api/v3rpc/watch.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvd2F0Y2guZ28=) | `80.06% <100%> (+1.63%)` | :arrow_up: |\n| [etcdserver/api/v3rpc/lease.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvbGVhc2UuZ28=) | `67.04% <0%> (-7.96%)` | :arrow_down: |\n| [auth/store.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-YXV0aC9zdG9yZS5nbw==) | `44.82% <0%> (-2.56%)` | :arrow_down: |\n| [lease/leasehttp/http.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-bGVhc2UvbGVhc2VodHRwL2h0dHAuZ28=) | `64.23% <0%> (-1.46%)` | :arrow_down: |\n| [etcdserver/api/v2http/client.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjJodHRwL2NsaWVudC5nbw==) | `84.3% <0%> (-1.21%)` | :arrow_down: |\n| [pkg/proxy/server.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-cGtnL3Byb3h5L3NlcnZlci5nbw==) | `60.2% <0%> (-1.02%)` | :arrow_down: |\n| [etcdserver/v3\\_server.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci92M19zZXJ2ZXIuZ28=) | `72.86% <0%> (-0.86%)` | :arrow_down: |\n| [mvcc/watchable\\_store.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-bXZjYy93YXRjaGFibGVfc3RvcmUuZ28=) | `82.51% <0%> (-0.7%)` | :arrow_down: |\n| [mvcc/metrics\\_txn.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-bXZjYy9tZXRyaWNzX3R4bi5nbw==) | `100% <0%> (ø)` | :arrow_up: |\n| ... and [20 more](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree-more) | |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=footer). Last update [ec52217...91042e2](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\n/cc @brancz \n/cc @jingyih @gyuho PTAL\nI have very little idea about the code changes, they look fine to me but I really don’t know the code very well. If it does what’s promised then I’m extremely excited to finally turn the alerts back on! :)\n@gyuho would you mind taking a peek please:).\n@xiang90 would you mind looking please.\nHi guys, any update on this? Thanks in advance!\nbump @hexfusion. There are TODOs on this PR from @xiang90’s feedback.\nI hope to get back to this soon.\n# [Codecov](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=h1) Report\n> Merging [#11375](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=desc) into [master](https://codecov.io/gh/etcd-io/etcd/commit/63dd73c1869f1784f907b922f61571176a2802e8&el=desc) will **decrease** coverage by `0.64%`.\n> The diff coverage is `100.00%`.\n\n[![Impacted file tree graph](https://codecov.io/gh/etcd-io/etcd/pull/11375/graphs/tree.svg?width=650&height=150&src=pr&token=so7nNovJo3)](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=tree)" + ] + } + }, + "metadata": { + "tags": [ + "etcd", + "graduated", + "orchestration", + "backport-v3-4" + ], + "category": "troubleshooting", + "cncfProjects": [ + "etcd" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/etcd-io/etcd/pull/11375", + "sourceRepo": "etcd-io/etcd", + "reactions": 3, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:46:39.166Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/etcd/etcd-11564-clientv3-fix-grpc-go-v1-27-0-incompatible-changes-to-balancer-resolve.json b/solutions/cncf-generated/etcd/etcd-11564-clientv3-fix-grpc-go-v1-27-0-incompatible-changes-to-balancer-resolve.json new file mode 100644 index 00000000..5057ce61 --- /dev/null +++ b/solutions/cncf-generated/etcd/etcd-11564-clientv3-fix-grpc-go-v1-27-0-incompatible-changes-to-balancer-resolve.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:24.938Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "etcd: clientv3: Fix grpc-go(v1.27.0) incompatible changes to balancer/resolver.", + "description": "clientv3: Fix grpc-go (v1.27.0) incompatible modification of balancer/resolver API.\n\nModify the API changed by balancer / resolver to ensure consistency with grpc-go (v1.27.0), otherwise clientv3 will not be able to be pulled by go mod, which will affect the direct use of users.\n\nReferences:\n1. [balancer/resolver: remove temporary backward-compatibility type aliases](https://github.com/grpc/grpc-go/pull/3309)\n2. [Notice: Upcoming Experimental Balancer/Resolver API Changes](https://github.com/grpc/grpc-go/issues/3180)\n\nFixes https://github.com/etcd-io/etcd/issues/11563", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "- reva configuration changed a bit,\n - [x] adjust config for shared jwt token\n - [x] add new config options\n - [x] layout\n - [ ] ~~others?~~\n- [x] login work\n- [x] file upload is broken\n- [x] the root storage may no longer be necessary\n - [ ] only affects ocis\n- [x] update docs\n- [x] add changelog\n- [x] uses grpc-go (v1.27.0), so we will run into the same problem as https://github.com/etcd-io/etcd/issues/11563\n - maybe it will get fixed in etcd by the time we try to use this in ocis: https://github.com/etcd-io/etcd/pull/11564\n - can be worked around by putting `replace google.golang.org/grpc => google.golang.org/grpc v1.26.0` into ocis go.mod", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/etcd-io/etcd/pull/11564?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [clientv3/balancer/resolver/endpoint/endpoint.go](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree#diff-Y2xpZW50djMvYmFsYW5jZXIvcmVzb2x2ZXIvZW5kcG9pbnQvZW5kcG9pbnQuZ28=) | `84.48% <100%> (+1.87%)` | :arrow_up: |\n| [clientv3/balancer/picker/err.go](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree#diff-Y2xpZW50djMvYmFsYW5jZXIvcGlja2VyL2Vyci5nbw==) | `100% <100%> (ø)` | :arrow_up: |\n| [clientv3/balancer/picker/roundrobin\\_balanced.go](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree#diff-Y2xpZW50djMvYmFsYW5jZXIvcGlja2VyL3JvdW5kcm9iaW5fYmFsYW5jZWQuZ28=) | `100% <100%> (ø)` | :arrow_up: |\n| [auth/options.go](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree#diff-YXV0aC9vcHRpb25zLmdv) | `35% <0%> (-57.5%)` | :arrow_down: |\n| [clientv3/balancer/utils.go](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree#diff-Y2xpZW50djMvYmFsYW5jZXIvdXRpbHMuZ28=) | `53.84% <0%> (-46.16%)` | :arrow_down: |\n| [client/client.go](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree#diff-Y2xpZW50L2NsaWVudC5nbw==) | `47.71% <0%> (-36.28%)` | :arrow_down: |\n| [pkg/transport/timeout\\_conn.go](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree#diff-cGtnL3RyYW5zcG9ydC90aW1lb3V0X2Nvbm4uZ28=) | `60% <0%> (-20%)` | :arrow_down: |\n| [auth/jwt.go](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree#diff-YXV0aC9qd3QuZ28=) | `51.68% <0%> (-16.89%)` | :arrow_down: |\n| [etcdserver/api/v3rpc/util.go](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvdXRpbC5nbw==) | `51.61% <0%> (-16.13%)` | :arrow_down: |\n| [etcdserver/api/membership/store.go](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvbWVtYmVyc2hpcC9zdG9yZS5nbw==) | `60.68% <0%> (-15.67%)` | :arrow_down: |\n| ... and [121 more](https://codecov.io/gh/etcd-io/etcd/pull/11564/diff?src=pr&el=tree-more) | |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/etcd-io/etcd/pull/11564?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/etcd-io/etcd/pull/11564?src=pr&el=footer). Last update [3898452...4258cdd](https://codecov.io/gh/etcd-io/etcd/pull/11564?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\ncc @jpbetz @gyuho\n@jpbetz @gyuho @jingyih ping review.\nThanks @eddycjy. Would you also update the changelog (3.5 since this is on master) with a `clientv3` line item about this change? Please include which grpc versions are compatible and which are incompatible (they're overlapping ranges since there was a period of backward compatibility via the typerefs).\n@jpbetz Thanks for your reply and reminder, I have submitted [CHANGELOG-3.5](https://github.com/etcd-io/etcd/pull/11637).\nNext time lets include the changelog update in the same PR as the main change. But I'm okay with separate for this issue since they are both already open.\r\n\r\nLGTM\n@gyuho WDYT?\nI get the following error when I simply try to `go build main.go` in a program which uses `\"go.etcd.io/etcd/clientv3\"`.", + "Will this PR solve the issue? \nAny ETA on merging this? We are hitting the go mod dependency issue.\nAny ETA on releasing 3.3.19? We are also hitting the go mod dependency issue. \nWhen this merge will be released? We are also facing the same issue.\r\n\r\nRegards,\r\nSwathin\nNot releasing this really screws projects that have dependencies with protobufs that have been generated by `github.com/golang/protobuf/protoc-gen-go v1.3.5` or later. As the only way to get etcd co-exist with these dependencies is to directly import [this commit](https://github.com/etcd-io/etcd/commit/221f0cc107cb3497eeb20fb241e1bcafca2e9115) via", + "Which then translates into" + ] + } + }, + "metadata": { + "tags": [ + "etcd", + "graduated", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "etcd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/etcd-io/etcd/pull/11564", + "sourceRepo": "etcd-io/etcd", + "reactions": 24, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:46:24.938Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/etcd/etcd-11580-fix-issues-with-updated-structure-names-in-grpc-go.json b/solutions/cncf-generated/etcd/etcd-11580-fix-issues-with-updated-structure-names-in-grpc-go.json new file mode 100644 index 00000000..574ee008 --- /dev/null +++ b/solutions/cncf-generated/etcd/etcd-11580-fix-issues-with-updated-structure-names-in-grpc-go.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:28.237Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "etcd: Fix issues with updated structure names in grpc-go.", + "description": "Fixes #11563", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.26.0 to 1.27.0.\n
\nRelease notes\n

Sourced from google.golang.org/grpc's releases.

\n
\n

Release 1.27.0

\n

API Changes

\n
    \n
  • balancer/resolver: remove temporary backward-compatibility type aliases (#3309)
  • \n
\n

Behavior Changes

\n
    \n
  • dns: ignore TXT errors unless GRPC_GO_IGNORE_TXT_ERRORS=false (#3299)
  • \n
\n

New Features

\n
    \n
  • client: add interface for ClientConn to be accepted by generated code (#3334)
  • \n
  • client: add WithResolvers option for specifying client-local resolvers (#3320)
  • \n
  • advancedtls: add new module for advanced TLS handshaker (#3187)
  • \n
  • credentials: create API for transport security level information (#3214)
  • \n
\n

Bug Fixes

\n
\n
:rocket: New features to boost your workflow: \n\n- :snowflake: [Test Analytics](https://docs.codecov.com/docs/test-analytics): Detect flaky tests, report on failures, and find test suite problems.\n
\n@xUser5000 can you please resolve the comments? thx\npls squash the commits\n@ahrtr should I fix the tests first or just ignore them?\n/retest\nThank you!\n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *
ahrtr*, *fuweid*, *xUser5000*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=etcd-io%2Fetcd).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[OWNERS](https://github.com/etcd-io/etcd/blob/main/OWNERS)~~ [ahrtr,fuweid]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n/cherry-pick release-3.6\n@fuweid: new pull request created: #20874\n\n
\n\nIn response to [this](https://github.com/etcd-io/etcd/pull/20792#issuecomment-3470927980):\n\n>/cherry-pick release-3.6\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes-sigs/prow](https://github.com/kubernetes-sigs/prow/issues/new?title=Prow%20issue:) repository.\n
\n/cherry-pick release-3.5\n@ahrtr: #20792 failed to apply on top of branch \"release-3.5\":" + ] + } + }, + "metadata": { + "tags": [ + "etcd", + "graduated", + "orchestration", + "area-testing", + "backport-v3-5", + "ok-to-test", + "size-m", + "approved", + "backport-v3-6" + ], + "category": "troubleshooting", + "cncfProjects": [ + "etcd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/etcd-io/etcd/pull/20792", + "sourceRepo": "etcd-io/etcd", + "reactions": 7, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:46:33.630Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-1537-reuse-vault-client-token-when-possible.json b/solutions/cncf-generated/external-secrets/external-secrets-1537-reuse-vault-client-token-when-possible.json new file mode 100644 index 00000000..777503ae --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-1537-reuse-vault-client-token-when-possible.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:21.877Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "external-secrets: Reuse vault client/token when possible", + "description": "This resolves #1273 by caching Vault clients and re-using existing tokens as long as they're valid. The approach is very similar to #1244, which implements something similar for the AWS provider. As in that PR, this new feature is gated behind a CLI flag: `--experimental-enable-vault-token-cache`. The only significant difference from the AWS implementation is that the cache key uses the SecretStore namespace rather than the secret namespace.\n\nAlso note that the token revocation implemented in #381 is disabled when token caching is enabled.I _think_ this is the right thing to do. We're now solving the underlying \"lease leak\" problem by not creating multiple token leases in the first place, which removes the need to clean them up.\n\nOther minor changes:\n* Fixed a couple of mis-named functions (`setSecretKeyToken` and `setAppRoleToken` function names were swapped from what they actually did)\n* Added some more mocking that was needed to get tests passing\n* Added logging when tokens are requ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "PRs from forked repos can not publish images, hence this scan fails.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Summarizing 6 Failures:\r\n [FAIL] [vault] sync secrets [It] [common] should find secrets by name using .DataFrom[] with cert auth [vault]\r\n /home/runner/go/pkg/mod/github.com/external-secrets/external-secrets@v0.5.8/e2e/framework/testcase.go:96\r\n [FAIL] [vault] sync secrets [It] [common] should sync json secrets with template with cert auth [vault]\r\n /home/runner/go/pkg/mod/github.com/external-secrets/external-secrets@v0.5.8/e2e/framework/testcase.go:96\r\n [FAIL] [vault] sync secrets [It] [common] should sync secrets with dataFrom with cert auth [vault]\r\n /home/runner/go/pkg/mod/github.com/external-secrets/external-secrets@v0.5.8/e2e/framework/testcase.go:96\r\n [FAIL] [vault] sync secrets [It] [common] should sync multiple secrets from .Data[] with cert auth [vault]\r\n /home/runner/go/pkg/mod/github.com/external-secrets/external-secrets@v0.5.8/e2e/framework/testcase.go:96\r\n [FAIL] [vault] sync secrets [It] [common] should sync docker configurated json secrets with template with cert auth [vault]\r\n /home/runner/go/pkg/mod/github.com/external-secrets/external-secrets@v0.5.8/e2e/framework/testcase.go:96\r\n [FAIL] [vault] sync secrets [It] [common] should sync with empty target name, using json. with cert auth [vault]\r\n /home/runner/go/pkg/mod/github.com/external-secrets/external-secrets@v0.5.8/e2e/framework/testcase.go:96\r\n\r\nRan 124 of 217 Specs in 1071.694 seconds\r\nFAIL! -- 118 Passed | 6 Failed | 1 Flaked | 0 Pending | 93 Skipped", + "$ make test TEST_SUITES=\"provider\" VERSION=e2e IMAGE_REGISTRY=local GINKGO_LABELS='vault'" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Secret", + "Namespace", + "Role" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/external-secrets/external-secrets/pull/1537", + "sourceRepo": "external-secrets/external-secrets", + "reactions": 3, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:48:21.877Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-339-add-support-for-google-cloud-identity.json b/solutions/cncf-generated/external-secrets/external-secrets-339-add-support-for-google-cloud-identity.json new file mode 100644 index 00000000..9d3b33fd --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-339-add-support-for-google-cloud-identity.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:12.072Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "external-secrets: Add support for Google Cloud Identity", + "description": "If the name of the service account secret is kept\nempty, this means we want to use Google Cloud Identity\nto authenticate against the GCP project", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I did not know that the client just falls down to workload identity by default, which makes it very clean at the end. That's dope!\n\nI am thinking here and maybe it would make sense to just make auth key optional only for gcp then? Like:\n```\ntype GCPSMProvider struct {\n\t// Auth defines the information necessary to authenticate against GCP\n+\t// +optional\n\tAuth GCPSMAuth `json:\"auth\"`\n\n\t// ProjectID project where secret is located\n\tProjectID string `json:\"projectID,omitempty\"`\n}\n```\n\nAnd then when we want to use GCP-WI we just create a store passing just the projectID.\n\n```\napiVersion: external-secrets.io/v1alpha1\nkind: SecretStore\nmetadata:\n name: example\nspec:\n provider:\n gcpsm:\n- auth:\n- secretRef:\n- secretAccessKeySecretRef:\n- name: gcpsm-secret\n- key: secret-access-credentials\n projectID: pid\n```\n\n.\n\nWhat do you think?\n\nPlease run `make check-diff` and `make fmt` whenever you open a PR :)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "type GCPSMProvider struct {\r\n\t// Auth defines the information necessary to authenticate against GCP\r\n+\t// +optional\r\n\tAuth GCPSMAuth `json:\"auth\"`\r\n\r\n\t// ProjectID project where secret is located\r\n\tProjectID string `json:\"projectID,omitempty\"`\r\n}", + "apiVersion: external-secrets.io/v1alpha1\r\nkind: SecretStore\r\nmetadata:\r\n name: example\r\nspec:\r\n provider:\r\n gcpsm:\r\n- auth:\r\n- secretRef:\r\n- secretAccessKeySecretRef:\r\n- name: gcpsm-secret\r\n- key: secret-access-credentials\r\n projectID: pid" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Service", + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/external-secrets/external-secrets/pull/339", + "sourceRepo": "external-secrets/external-secrets", + "reactions": 5, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:12.072Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-3477-infisical-provider.json b/solutions/cncf-generated/external-secrets/external-secrets-3477-infisical-provider.json new file mode 100644 index 00000000..2b1c3736 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-3477-infisical-provider.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:13.375Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "external-secrets: Infisical provider", + "description": "## Problem Statement\n\n1. Implemented infisical provider\n\n## Related Issue\n\nResolves #3466 \n\n## Proposed Changes\n\nAdded a new provider\n\n## Checklist\n\n- [x] I have read the [contribution guidelines](https://external-secrets.io/latest/contributing/process/#submitting-a-pull-request)\n- [x] All commits are signed with `git commit --signoff`\n- [x] My changes have reasonable test coverage\n- [x] All tests pass with `make test` \n- [x] I ensured my PR is ready for review with `make reviewable`", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "hi @akhilmhdh . Can you please update the `stability-support` matrix with the expected features that this provider will have? I see the code is missing several features there, and I cannot really tell if it is by design or not.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/external-secrets/external-secrets/pull/3477", + "sourceRepo": "external-secrets/external-secrets", + "reactions": 5, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:48:13.375Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-3603-feat-add-bitwarden-secret-manager-support.json b/solutions/cncf-generated/external-secrets/external-secrets-3603-feat-add-bitwarden-secret-manager-support.json new file mode 100644 index 00000000..acafccb0 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-3603-feat-add-bitwarden-secret-manager-support.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:10.933Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "external-secrets: feat: add bitwarden secret manager support", + "description": "## Problem Statement\n\nBitwarden Secret Manager client.\n\n## Related Issue\n\nFixes https://github.com/external-secrets/external-secrets/issues/2661\n\n## Proposed Changes\n\nHow do you like to solve the issue and why?\n\n## Checklist\n\n- [x] I have read the [contribution guidelines](https://external-secrets.io/latest/contributing/process/#submitting-a-pull-request)\n- [x] All commits are signed with `git commit --signoff`\n- [x] My changes have reasonable test coverage\n- [x] All tests pass with `make test`\n- [x] I ensured my PR is ready for review with `make reviewable`", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> ℹ️ **Note**\n> \n> This PR body was truncated due to platform limits.\n\nThis PR contains the following updates:\n\n| Package | Update | Change |\n|---|---|---|\n| [external-dns](https://bitnami.com) ([source](https://redirect.github.com/bitnami/charts/tree/HEAD/bitnami/external-dns)) | minor | `7.0.0` → `7.5.7` |\n| [external-secrets/external-secrets](https://redirect.github.com/external-secrets/external-secrets) | minor | `v0.9.0` → `v0.20.4` |\n\n---\n\n### Release Notes\n\n
\nbitnami/charts (external-dns)\n\n### [`v7.5.7`](https://redirect.github.com/bitnami/charts/blob/HEAD/bitnami/external-dns/CHANGELOG.md#small757-2024-06-18-small)\n\n- \\[bitnami/external-dns] Release 7.5.7 ([#​27341](https://redirect.github.com/bitnami/charts/issues/27341)) ([6668b3c](https://redirect.github.com/bitnami/charts/commit/6668b3c1ae632eb90b6e825fedddc39637aec137)), closes [#​27341](https://redirect.github.com/bitnami/charts/issues/27341)\n\n### [`v7.5.6`](https://redirect.github.com/bitnami/charts/blob/HEAD/bitnami/external-dns/CHANGELOG.md#small756-2024-06-17-small)\n\n- \\[bitnami/external-dns] Release 7.5.6 ([#​27216](https://redirect.github.com/bitnami/charts/issues/27216)) ([cfe95c9](https://redirect.github.com/bitnami/charts/commit/cfe95c9c1d2c0e6b8e3f1b574f992f47486bc91a)), closes [#​27216](https://redirect.github.com/bitnami/charts/issues/27216)\n\n### [`v7.5.5`](https://redirect.github.com/bitnami/charts/blob/HEAD/bitnami/external-dns/CHANGELOG.md#small75", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: external-secrets.io/v1beta1\r\nkind: ExternalSecret\r\nmetadata:\r\n name: bitwarden\r\nspec:\r\n refreshInterval: 10s\r\n secretStoreRef:\r\n # This name must match the metadata.name in the `SecretStore`\r\n name: bitwarden-secretsmanager\r\n kind: SecretStore\r\n data:\r\n - secretKey: test\r\n remoteRef:\r\n key: \"test2\"\r\n property: \"f5847eef-2f89-43bc-885a-b18a01178e3e\"", + "apiVersion: v1\r\ndata:\r\n test: c2VjcmV0 # test\r\nimmutable: false\r\nkind: Secret\r\nmetadata:\r\n name: bitwarden\r\n namespace: default\r\ntype: Opaque", + "apiVersion: external-secrets.io/v1alpha1\r\nkind: PushSecret\r\nmetadata:\r\n name: pushsecret-bitwarden # Customisable\r\nspec:\r\n refreshInterval: 10s # Refresh interval for which push secret will reconcile\r\n secretStoreRefs: # A list of secret stores to push secrets to\r\n - name: bitwarden-secretsmanager\r\n kind: SecretStore\r\n selector:\r\n secret:\r\n name: my-secret # Source Kubernetes secret to be pushed\r\n data:\r\n - match:\r\n secretKey: test4 # Source Kubernetes secret key to be pushed\r\n remoteRef:\r\n remoteKey: test4 # Remote reference (where the secret is going to be pushed)\r\n property: f5847eef-2f89-43bc-885a-b18a01178e3e\r\n metadata:\r\n note: \"Note of the secret to add.\"" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/external-secrets/external-secrets/pull/3603", + "sourceRepo": "external-secrets/external-secrets", + "reactions": 13, + "comments": 3 + }, + "security": { + "scannedAt": "2026-02-27T17:48:10.933Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-4538-feat-aws-support-for-aws-tags.json b/solutions/cncf-generated/external-secrets/external-secrets-4538-feat-aws-support-for-aws-tags.json new file mode 100644 index 00000000..8d6a7a95 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-4538-feat-aws-support-for-aws-tags.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:17.937Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "external-secrets: feat(aws): support for aws tags", + "description": "## Problem Statement\n\nWhat is the problem you're trying to solve?\n\n## Related Issue\n\nPartially resolves #1821 . Provides a capability to create secrets with KSM key, Description and Tags. \n\nIt does not:\n- modify existing keys if/when tags added/removed\n- provide a way to configure capability to set KSM resource policy\n- secret replication in other region\n\n## Proposed Changes\n\nHow do you like to solve the issue and why?\n\nAdded support to create a secret with\n- tags\n- description\n- kms key default and non-default\n\nvery similar to \n- azure https://github.com/external-secrets/external-secrets/pull/4507\n- aws parameter store https://github.com/external-secrets/external-secrets/blob/63740fcbfd834734e991d887421e6c52f8c11a8c/pkg/provider/aws/parameterstore/parameterstore.go#L583\n\nThere going to be a breaking change, as before it was \n```yml\nmetada:\n secretPushFormat: string\n```\n\nnow\n```yml\n metadata:\n apiVersion: kubernetes.external-secrets.io/v1alpha1\n kind: PushSecretMet", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Problem Statement\n\nWhat is the problem you're trying to solve?\n- at the moment, tags only applied to secrets on secret creation.\n- Secret manager to support patch/update/delete tags (aka full lifecycle) https://github.com/external-secrets/external-secrets/issues/1821#issuecomment-2848624074\n- Initially I've added only attach tags on secret creation https://github.com/external-secrets/external-secrets/pull/4538 the plan was to add update/delete right after aws-sdk bump to v2\n- small fix \n\"Screenshot\n\n## Related Issue\n\nFixes #4538\n\n## Proposed Changes\n\nAdded support for tags lifecycle\n\n## Checklist\n\n- [x] I have read the [contribution guidelines](https://external-secrets.io/latest/contributing/process/#submitting-a-pull-request)\n- [x] All commits are signed with `git commit --signoff`\n- [x] My changes have reasonable test coverage\n- [x] All tests pass with `make test`\n- [x] I ensured my PR is ready for review with `make reviewable`\n\nManifest\n\n```yml\n---\napiVersion: v1\nkind: Namespace\nmetadata:\n name: external-secrets\n---\n# https://external-secrets.io/latest/api/secretstore/\napiVersion: external-secrets.io/v1\nkind: SecretStore\nmetadata:\n name: aws-secretstore\n namespace: external-secrets\nspec:\n provider:\n aws:\n service: SecretsManager\n region: eu-west-1\n secretsManager:\n forceDeleteWithoutRecovery: tru", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "metada:\r\n secretPushFormat: string", + "metadata:\r\n apiVersion: kubernetes.external-secrets.io/v1alpha1\r\n kind: PushSecretMetadata\r\n spec:\r\n secretPushFormat: string # When not set, default to binary", + "apiVersion: external-secrets.io/v1beta1\r\nkind: SecretStore\r\nmetadata:\r\n name: secretstore-sample-ik\r\nspec:\r\n provider:\r\n aws:\r\n service: SecretsManager\r\n region: eu-west-1\r\n secretsManager:\r\n forceDeleteWithoutRecovery: true\r\n auth:\r\n secretRef:\r\n accessKeyIDSecretRef:\r\n name: awssm-secret\r\n key: access-key\r\n secretAccessKeySecretRef:\r\n name: awssm-secret\r\n key: secret-access-key\r\n---\r\napiVersion: generators.external-secrets.io/v1alpha1\r\nkind: Password\r\nmetadata:\r\n name: my-password\r\n namespace: external-secrets\r\nspec:\r\n length: 12\r\n digits: 5\r\n symbols: 5\r\n symbolCharacters: \"-_$@\"\r\n noUpper: false\r\n allowRepeat: true\r\n---\r\napiVersion: external-secrets.io/v1alpha1\r\nkind: PushSecret\r\nmetadata:\r\n name: pushsecret-to-aws-example # Customisable\r\n namespace: external-secrets # Same of the SecretStores\r\n labels:\r\n this-is-the-label: \"lol\"\r\n annotations:\r\n this-is-the-annotation: \"haha\"\r\nspec:\r\n deletionPolicy: Delete\r\n refreshInterval: 1m # Refresh interval for which push secret will reconcile\r\n secretStoreRefs: # A list of secret stores to push secrets to\r\n - name: secretstore-sample-ik\r\n kind: SecretStore\r\n selector:\r\n generatorRef:\r\n apiVersion: generators.external-secrets.io/v1alpha1\r\n kind: Password\r\n name: my-password\r\n template:\r\n metadata:\r\n annotations:\r\n a-key2: value1\r\n labels:\r\n l-key2: value1\r\n pp.kubernetes.io/part-of: testing\r\n data:\r\n - conversionStrategy: None\r\n match:\r\n secretKey: password # Source Kubernetes secret key to be pushed\r\n remoteRef:\r\n remoteKey: teamb-my-first-parameter-6 # Remote reference (where the secret is going to be pushed)\r\n metadata:\r\n apiVersion: kubernetes.external-secrets.io/v1alpha1\r\n kind: PushSecretMetadata\r\n spec:\r\n kmsKeyID: bb123123-b2b0-4f60-ac3a-44a13f0e6b6c\r\n secretPushFormat: string\r\n description: \"this is key description\"\r\n tags: # Tags to be added to the secret in Azure Key Vault\r\n secret-store: teamb-secret-store\r\n refresh-interval: 1h" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Service", + "Secret", + "Namespace" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/external-secrets/external-secrets/pull/4538", + "sourceRepo": "external-secrets/external-secrets", + "reactions": 4, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:17.937Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-4628-feat-add-1password-sdk-based-provider.json b/solutions/cncf-generated/external-secrets/external-secrets-4628-feat-add-1password-sdk-based-provider.json new file mode 100644 index 00000000..2a881623 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-4628-feat-add-1password-sdk-based-provider.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:19.203Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "external-secrets: feat: add 1Password SDK based provider", + "description": "## Problem Statement\n\n1Password using the SDK\n\n## TODOs\n\n- [x] Unit testing\n- [x] Documentation\n- [x] Update support table\n\n## Related Issue\n\nFixes https://github.com/external-secrets/external-secrets/issues/3655\n\n## Proposed Changes\n\nHow do you like to solve the issue and why?\n\n## Checklist\n\n- [ ] I have read the [contribution guidelines](https://external-secrets.io/latest/contributing/process/#submitting-a-pull-request)\n- [ ] All commits are signed with `git commit --signoff`\n- [ ] My changes have reasonable test coverage\n- [ ] All tests pass with `make test`\n- [ ] I ensured my PR is ready for review with `make reviewable`", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I mean, I think so! v1 promotion is literally ready to go (aside from reviews of course)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: external-secrets.io/v1\r\nkind: SecretStore\r\nmetadata:\r\n name: onepassword\r\nspec:\r\n provider:\r\n onepasswordSDK:\r\n vault: TestVault\r\n auth:\r\n serviceAccountSecretRef:\r\n name: onepassword-token\r\n key: token", + "apiVersion: external-secrets.io/v1\r\nkind: ExternalSecret\r\nmetadata:\r\n name: fetch-from-onepassword\r\nspec:\r\n secretStoreRef:\r\n kind: SecretStore\r\n name: onepassword\r\n target:\r\n creationPolicy: Owner\r\n data:\r\n - secretKey: test-login-1\r\n remoteRef:\r\n key: test-login-1/username", + "Normal Synced 9s pushsecret PushSecret synced successfully" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/external-secrets/external-secrets/pull/4628", + "sourceRepo": "external-secrets/external-secrets", + "reactions": 4, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:19.203Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-5470-feat-dynamic-target-implementation-for-external-secrets-so.json b/solutions/cncf-generated/external-secrets/external-secrets-5470-feat-dynamic-target-implementation-for-external-secrets-so.json new file mode 100644 index 00000000..bb210b99 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-5470-feat-dynamic-target-implementation-for-external-secrets-so.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:16.272Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "external-secrets: feat: dynamic target implementation for external secrets sources ", + "description": "## Problem Statement\n\nThis is an implementation of syncing to a custom resource https://github.com/external-secrets/external-secrets/blob/main/design/012-sync-to-custom-resource.md.\n\nThe following changes have been applied to the external secret controller and the external secret object:\n\n- added manifests to ES to signify what kind of object needs to be tracked\n- overhauled the templating to accommodate unstructured objects instead of just secrets\n- added dynamic watches using informers to all the object kinds ( this implementation registered and deregisters informers based on how many usages are for the current used GVK )\n- modified the external secret controller to check non-secret objects\n- everything else should work the same way as with secrets ( meaning ownership, merging, updating, deleting )\n- added documentation\n- added a feature flag to put all of this behind\n- updated the helm values files to include custom RBAC access for the custom objects\n\n## Related Issue\n\n## Proposed C", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Should we add a `ConfigMap` test case to e2e? I think this is big enough that it is probably worth it. Thoughts?\n\nMaybe not for all providers, but at least for `vault` which we can run locally", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "feat(scope): add new feature\r\nfix(scope): fix bug\r\ndocs(scope): update documentation\r\nchore(scope): update build tool or dependencies\r\nref(scope): refactor code\r\nclean(scope): provider cleanup\r\ntest(scope): add tests\r\nperf(scope): improve performance\r\ndesig(scope): improve design", + "{\"level\":\"info\",\"ts\":1761594522.664298,\"logger\":\"controllers.ExternalSecret.informer-manager\",\"msg\":\"registered ExternalSecret with existing informer\",\"gvk\":\"/v1, Kind=ConfigMap\",\"externalSecret\":{\"name\":\"mfa-generator-es\",\"namespace\":\"default\"},\"totalUsers\":2}", + "{\"level\":\"info\",\"ts\":1761594564.7881262,\"logger\":\"controllers.ExternalSecret.informer-manager\",\"msg\":\"registered ExternalSecret with existing informer\",\"gvk\":\"/v1, Kind=ConfigMap\",\"externalSecret\":{\"name\":\"templated-config\",\"namespace\":\"default\"},\"totalUsers\":2}" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security", + "kind-feature", + "kind-documentation", + "size-l" + ], + "category": "security", + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/external-secrets/external-secrets/pull/5470", + "sourceRepo": "external-secrets/external-secrets", + "reactions": 5, + "comments": 32 + }, + "security": { + "scannedAt": "2026-02-27T17:48:16.272Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-5930-chore-update-controller-runtime.json b/solutions/cncf-generated/external-secrets/external-secrets-5930-chore-update-controller-runtime.json new file mode 100644 index 00000000..dc33a9c5 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-5930-chore-update-controller-runtime.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:23.946Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "external-secrets: chore: update controller runtime", + "description": "## Problem Statement\n\nJesus this was painful.\n\n## Related Issue\n\nFixes https://github.com/external-secrets/external-secrets/issues/5926\n\n## Proposed Changes\n\nHow do you like to solve the issue and why?\n\n## Format\n\nPlease ensure that your PR follows the following format for the title:\n```\nfeat(scope): add new feature\nfix(scope): fix bug\ndocs(scope): update documentation\nchore(scope): update build tool or dependencies\nref(scope): refactor code\nclean(scope): provider cleanup\ntest(scope): add tests\nperf(scope): improve performance\ndesig(scope): improve design\n```\n\nWhere `scope` is _optionally_ one of:\n- charts\n- release\n- testing\n- security\n- templating\n\n## Checklist\n\n- [ ] I have read the [contribution guidelines](https://external-secrets.io/latest/contributing/process/#submitting-a-pull-request)\n- [ ] All commits are signed with `git commit --signoff`\n- [ ] My changes have reasonable test coverage\n- [ ] All tests pass with `make test`\n- [ ] I ensured my PR is ready for review with `make ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Problem Statement\n\nThe external-secrets api module depends on v0.34 of the k8s controller runtime, which can lead to dependency conflicts with other components.\n\n## Related Issue\n\nFixes #...\n\n## Proposed Changes\n\nBump the k8s api dependencies to 0.35.0. This includes a change to how webhooks are registered,\nrequiring that the apitype be passed to `ctrl.NewWebhookManagedBy`.\n\nThis change made it awkward to use the GenericStoreValidator, essentially because the\nwebhookBuilder.WithValidator now uses generics, but we can't add generics to GenericStoreValidator makes it incompatible with kubebuilder code-gen.\n\nAs a workaround, I've removed GenericStoreValidator and added explicit types for SecretStoreValidator and ClusterSecretStoreValidator.\n\n## Format\n\nPlease ensure that your PR follows the following format for the title:\n```\nfeat(scope): add new feature\nfix(scope): fix bug\ndocs(scope): update documentation\nchore(scope): update build tool or dependencies\nref(scope): refactor code\nclean(scope): provider cleanup\ntest(scope): add tests\nperf(scope): improve performance\ndesig(scope): improve design\n```\n\nWhere `scope` is _optionally_ one of:\n- charts\n- release\n- testing\n- security\n- templating\n\n## Checklist\n\n- [x] I have read the [contribution guidelines](https://external-secrets.io/latest/contributing/process/#submitting-a-pull-request)\n- [x] All commits are signed with `git commit --signoff`\n- [x] My changes have reasonable test coverage\n- [x] All tests pass with `make test`\n- [ ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "feat(scope): add new feature\r\nfix(scope): fix bug\r\ndocs(scope): update documentation\r\nchore(scope): update build tool or dependencies\r\nref(scope): refactor code\r\nclean(scope): provider cleanup\r\ntest(scope): add tests\r\nperf(scope): improve performance\r\ndesig(scope): improve design", + "feat(scope): add new feature\r\nfix(scope): fix bug\r\ndocs(scope): update documentation\r\nchore(scope): update build tool or dependencies\r\nref(scope): refactor code\r\nclean(scope): provider cleanup\r\ntest(scope): add tests\r\nperf(scope): improve performance\r\ndesig(scope): improve design" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security", + "kind-documentation", + "kind-dependency", + "kind-chore", + "size-xl" + ], + "category": "security", + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/external-secrets/external-secrets/pull/5930", + "sourceRepo": "external-secrets/external-secrets", + "reactions": 3, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:23.946Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-783-gcp-allow-cluster-to-be-in-different-project.json b/solutions/cncf-generated/external-secrets/external-secrets-783-gcp-allow-cluster-to-be-in-different-project.json new file mode 100644 index 00000000..8383cdfb --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-783-gcp-allow-cluster-to-be-in-different-project.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:20.245Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "external-secrets: GCP: allow cluster to be in different project", + "description": "Fixes #772 \n\nInitial proposal of begin able to have your GKE cluster in a different project that where the GCPSM is. I did not yet update any docs or tests. Input / help is welcome\n\nIntend for now is to be backward compatible so this feature can be released without any blockers", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Yeah, I agree, since the code is already defaulting to Provider.GCPSM.ProjectID, we should make the inner field optional", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "// get clusterProjectID from workload identity spec but default to Provider.GCPSM.ProjectID\r\n\tvar clusterProjectID string\r\n\tif wi.ClusterProjectID != \"\" {\r\n\t\tclusterProjectID = wi.ClusterProjectID\r\n\t} else { // never reached since v1beta1 asks this as mandatory parameter.\r\n\t\tclusterProjectID = spec.Provider.GCPSM.ProjectID \r\n\t}" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/external-secrets/external-secrets/pull/783", + "sourceRepo": "external-secrets/external-secrets", + "reactions": 3, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:48:20.245Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-917-feat-replace-prometheus-annotations-with-servicemonitor.json b/solutions/cncf-generated/external-secrets/external-secrets-917-feat-replace-prometheus-annotations-with-servicemonitor.json new file mode 100644 index 00000000..ad106fa4 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-917-feat-replace-prometheus-annotations-with-servicemonitor.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:14.793Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "external-secrets: feat: replace prometheus annotations with servicemonitor", + "description": "This PR replaces prometheus annotations with ServiceMonitor implementation.\n\nchanges:\n- remove config option to specify metrics port (it's not directly configurable through helmchart, and there should be no need to)\n- add dedicated `$component-servicemonitor.yaml` file that contains service + servicemonitor for each component \n\nScreenshot with all enabled service monitors\n![poc-metrics](https://user-images.githubusercontent.com/1709030/160910429-5bb80fef-404a-404b-bdb3-34c76c1de93c.png)\n\nSupersedes #779 \nFixes #361", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "See #361", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "helm install prometheus prometheus-community/kube-prometheus-stack\r\nhelm upgrade --install eso --debug --wait --timeout=1m \\\r\n ./deploy/charts/external-secrets \\\r\n --set installCRDs=true --set serviceMonitor.enabled=true \\\r\n --set serviceMonitor.additionalLabels.release=prometheus \\\r\n --set webhook.serviceMonitor.enabled=true \\\r\n --set webhook.serviceMonitor.additionalLabels.release=prometheus \\\r\n --set certController.serviceMonitor.enabled=true \\\r\n --set certController.serviceMonitor.additionalLabels.release=prometheus\r\n\r\n# then \r\nkubectl port-forward svc/prometheus-kube-prometheus-prometheus 9090:9090\r\n\r\n# visit: http://localhost:9090/targets", + "{{- if .Values.prometheus.enabled }}\r\nThe flag `prometheus.enabled` is deprecated and will be removed in the next release. Please use `servicemonitor.enabled` instead.\r\n{{- end }}", + "[...]\r\nexternal-secrets has been deployed successfully!\r\n\r\nIn order to begin using ExternalSecrets, you will need to set up a SecretStore\r\nor ClusterSecretStore resource (for example, by creating a 'vault' SecretStore).\r\n\r\nMore information on the different types of SecretStores and how to configure them\r\ncan be found in our Github: https://github.com/external-secrets/external-secrets\r\n\r\ndeprecation warning:\r\n> The flag `prometheus.enabled` is deprecated and will be removed in the next release.\r\n Please migrate to using servicemonitor instead." + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security" + ], + "category": "security", + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/external-secrets/external-secrets/pull/917", + "sourceRepo": "external-secrets/external-secrets", + "reactions": 5, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:14.793Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/falco/falco-1442-falco-to-build-on-aarch64.json b/solutions/cncf-generated/falco/falco-1442-falco-to-build-on-aarch64.json new file mode 100644 index 00000000..fd94dcaf --- /dev/null +++ b/solutions/cncf-generated/falco/falco-1442-falco-to-build-on-aarch64.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:47.023Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "falco: Falco to build on aarch64", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**Any specific area of the project related to this PR?**\n\n/area build\n\n**What this PR does / why we need it**:\n\nWhat happens here:\n\n- [x] libscap, libsinsp and drivers updated to the `fntlnz-aarch64` branch, this is because we needed to add some preprocessor gates for aarch64. **This needs to be changed with the specific commit id and SHA before merging** https://github.com/draios/sysdig/pull/1701\n- [x] luajit updated to git tag `1d8b747c161db457e032a023ebbff511f5de5ec2` - this is because luajit didn't tag since 2017, but they are actively developing it. Initially we wanted to switch to moonjit to support also ppc64le but the new luajit commits also address that along with the 64 bit changes needed for arm64. Please\n- [x] gRPC updated to 1.32.0 [read more](https://github.com/falcosecurity/falco/pull/1442/commits/358851de1132e9495ddd037394124d75bace1ceb)\n- [x] cpack fixes to produce aarch64 packees\n- CI build targets - opened an issue for thi", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This also supersedes the changes made for ppc64le https://github.com/falcosecurity/falco/pull/1225 since the only change needed here was to use moonjit instead of luajit. Many projects and OSes (like [Alpine](https://pkgs.alpinelinux.org/package/edge/main/x86/luajit-dev)) already use moonjit as a synonim of luajit.\n\nThis will likely require some testing but the next release is on December 1st so there's no better time than now.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "This also supersedes the changes made for ppc64le https://github.com/falcosecurity/falco/pull/1225 since the only change needed here was to use moonjit instead of luajit. Many projects and OSes (like [Alpine](https://pkgs.alpinelinux.org/package/edge/main/x86/luajit-dev)) already use moonjit as a synonim of luajit.\r\n\r\nThis will likely require some testing but the next release is on December 1st so there's no better time than now.\nThis PR also updates libsinsp and libscap, it's important to note that those already depend on moonjit for certain architectures and that there was a breaking change that needed to be addressed: https://github.com/draios/sysdig/pull/1693\nNote: 2.1.2 seems to be the latest moonjit that works for our lua code.\r\n\r\nTried to update to 2.2.0 and we start having issues. I know we want to start making plans to remove lua as we already did for the outputs in #1412 so I'll just note this here and we can follow up on that if we don't make such plan in the near future." + ] + } + }, + "metadata": { + "tags": [ + "falco", + "graduated", + "security", + "kind-feature", + "size-xl", + "release-note", + "dco-signoff--yes", + "lgtm", + "approved", + "do-not-merge-hold", + "area-build" + ], + "category": "security", + "cncfProjects": [ + "falco" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/falcosecurity/falco/pull/1442", + "sourceRepo": "falcosecurity/falco", + "reactions": 4, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:47.023Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/falco/falco-1758-sign-packages-and-linux-repositories-with-sha256.json b/solutions/cncf-generated/falco/falco-1758-sign-packages-and-linux-repositories-with-sha256.json new file mode 100644 index 00000000..db5db435 --- /dev/null +++ b/solutions/cncf-generated/falco/falco-1758-sign-packages-and-linux-repositories-with-sha256.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:48.050Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "falco: Sign Packages and Linux Repositories with SHA256.", + "description": "**What type of PR is this?**\n\n> Uncomment one (or more) `/kind <>` lines:\n\n> /kind bug\n\n> /kind cleanup\n\n> /kind design\n\n> /kind documentation\n\n> /kind failing-test\n\n/kind feature\n\n> If contributing rules or changes to rules, please make sure to also uncomment one of the following line:\n\n> /kind rule-update\n\n> /kind rule-create\n\n**Any specific area of the project related to this PR?**\n\n> Uncomment one (or more) `/area <>` lines:\n\n/area build\n\n> /area engine\n\n> /area rules\n\n> /area tests\n\n> /area proposals\n\n**What this PR does / why we need it**:\nCurrently, Falco Linux packages are signed with SHA1. This prevents the installation of Falco on high-assurance systems that do not allow for the installation of packages without signatures made using SHA256.\n\n**Which issue(s) this PR fixes**:\n\nFixes #1751\n\n**Special notes for your reviewer**:\nThe SHA256 algorithms should be well supported by Linux distributions in use today. I do not believe this this will break installs for anyone.\n\n**Does th", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Closing and re-opening to trigger the CI\n/close", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "falco", + "graduated", + "security", + "kind-feature", + "release-note", + "dco-signoff--yes", + "lgtm", + "size-s", + "approved", + "area-build" + ], + "category": "security", + "cncfProjects": [ + "falco" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/falcosecurity/falco/pull/1758", + "sourceRepo": "falcosecurity/falco", + "reactions": 4, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:46:48.050Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/falco/falco-1997-circle-ci-build-job-for-arm64.json b/solutions/cncf-generated/falco/falco-1997-circle-ci-build-job-for-arm64.json new file mode 100644 index 00000000..77b4cb4d --- /dev/null +++ b/solutions/cncf-generated/falco/falco-1997-circle-ci-build-job-for-arm64.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:49.216Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "falco: Circle CI build job for ARM64", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**Any specific area of the project related to this PR?**\n\n/area build\n\n**What this PR does / why we need it**:\nAdded Circle CI job to build Falco for ARM64 platform. \n\n**Which issue(s) this PR fixes**:\n\nFixes #1891 \n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/milestone 0.33.0", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Welcome @odidev! It looks like this is your first PR to falcosecurity/falco 🎉\nSo, we surely need to port `publish-deb` script.\r\nAs you can see, `publish-bin` already takes architecture as argument. I think we must do the same with `publish-deb`:\r\nbasically, on this line: https://github.com/falcosecurity/falco/blob/master/scripts/publish-deb#L41 you must check the external argument provided instead of declaring a local variable.\r\n\r\n`publish-rpm` does not seem to reference `x86_64` or any `uname` variable, therefore i guess we shouldn't touch it.\r\n\r\nI will need someone else to review the workflow too :) It seems fine to me:\r\n## Build and test workflow\r\n* we have the `build-arm64` job that builds and stores artifacts for arm64\r\n* `build-arm64` is required by `rpm-sign` that is itself required by `publish/packages-dev`\r\n* `build-arm64` is also required by `publish/packages-deb-dev`", + "## Release workflow\r\n* we have the `build-arm64` job that builds and stores artifacts for arm64\r\n* `build-arm64` is required by `rpm-sign` that is itself required by `publish/packages`\r\n* `build-arm64` is also required by `publish/packages-deb`", + "EDIT: i also left a couple of comments that need to be addressed :) \r\nThanks btw!!\nTo maintainers: note that the `build-arm64` job will never work unless the `falcosecturity/falco-builder:latest` image is available for arm64; that's part of #1990!\n@odidev it seems like the new v2.1 workflows version is breaking the build:" + ] + } + }, + "metadata": { + "tags": [ + "falco", + "graduated", + "security", + "kind-feature", + "dco-signoff--yes", + "lgtm", + "release-note-none", + "approved", + "size-l", + "area-build" + ], + "category": "security", + "cncfProjects": [ + "falco" + ], + "targetResourceKinds": [ + "Job" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/falcosecurity/falco/pull/1997", + "sourceRepo": "falcosecurity/falco", + "reactions": 3, + "comments": 26 + }, + "security": { + "scannedAt": "2026-02-27T17:46:49.216Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/falco/falco-2353-fix-falco-driver-loader-source-only-print-env-vars.json b/solutions/cncf-generated/falco/falco-2353-fix-falco-driver-loader-source-only-print-env-vars.json new file mode 100644 index 00000000..e8a84648 --- /dev/null +++ b/solutions/cncf-generated/falco/falco-2353-fix-falco-driver-loader-source-only-print-env-vars.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:50.170Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "falco: fix: falco-driver-loader source-only print env vars", + "description": "**What type of PR is this?**\n\n/kind bug\n\n**Any specific area of the project related to this PR?**\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\nFixes #2352\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nnew(falco-driver-loader): --source-only now prints the values as env vars\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I like it. I see some failing tests, any clue? :thinking:", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "falco", + "graduated", + "security", + "kind-bug", + "release-note", + "dco-signoff--yes", + "lgtm", + "approved", + "size-l" + ], + "category": "security", + "cncfProjects": [ + "falco" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/falcosecurity/falco/pull/2353", + "sourceRepo": "falcosecurity/falco", + "reactions": 3, + "comments": 24 + }, + "security": { + "scannedAt": "2026-02-27T17:46:50.170Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/falco/falco-2501-new-ci-gha-master-and-release-workflows.json b/solutions/cncf-generated/falco/falco-2501-new-ci-gha-master-and-release-workflows.json new file mode 100644 index 00000000..c6f1a80c --- /dev/null +++ b/solutions/cncf-generated/falco/falco-2501-new-ci-gha-master-and-release-workflows.json @@ -0,0 +1,57 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:51.715Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "falco: new(ci): gha master and release workflows", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**Any specific area of the project related to this PR?**\n\n/area CI\n\n**What this PR does / why we need it**:\n\nThis PR tries to enable master and release build + packages + docker images CI on gha instead of circleci.\nIt surely needs some tests hence the `wip`.\n\nTo do so, it uses `reusable` workflows that are shared between master and release; therefore testing CI on master should be enough to test also the release one, since they are exactly the same.\nMoreover, people could also reuse our provided workflows in their own CI.\n\n**Which issue(s) this PR fixes**:\n\nFixes #1876 \n\n**Special notes for your reviewer**:\n\nSteps:\n- [x] fix arm64 build\n- [x] fix arm64 job build Falco version (it sees 0.0.0 somehow...)\n- [x] add GPG_KEY as gha variable (needs repo admin)\n- [x] fix master and release yaml issues\n- [x] understand how to docker in docker (reusable_build_docker workflow) -> just go on host. Tested that `docker` and `awscli` can be used in those", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I decided to split dev and release so that the master CI is not able to push anything to the release packages.\n\nSee https://github.com/falcosecurity/falco/pull/2501", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "I decided to split dev and release so that the master CI is not able to push anything to the release packages.\r\n\r\nSee https://github.com/falcosecurity/falco/pull/2501\nRemaining steps have been added to PR body.\n/milestone 0.35.0\nAlso, we definitely need more powerful gha self-hosted runners for arm (and more replicas). \r\n\nUh this time build-arm64 is much faster. Don't know what happened :/ \nStill struggling to fix test-arm64 build:", + "This is weird.\r\n\r\n\r\nEDIT:", + "Oh, ok :D \nOk `test-build-arm64` job is now fixed.\nThis is PHENOMENAL @FedeDP ❤️ ❤️ \r\n\r\nI'm looking at the PR and also helping you address the open points.\r\n✅ I have added the `GPG_KEY` variable for the package signing key.\r\nAlso, I already set up ECR with GHA in the falco org so we can do the same here\nI have opened a PR for ECR access as well. Once it's merged, the role to assume for ECR push will be" + ] + } + }, + "metadata": { + "tags": [ + "falco", + "graduated", + "security", + "kind-feature", + "release-note", + "dco-signoff--yes", + "lgtm", + "approved", + "size-xxl", + "area-ci" + ], + "category": "security", + "cncfProjects": [ + "falco" + ], + "targetResourceKinds": [ + "Job", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/falcosecurity/falco/pull/2501", + "sourceRepo": "falcosecurity/falco", + "reactions": 3, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:46:51.716Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/falco/falco-2905-new-falcoctl-driver-loader.json b/solutions/cncf-generated/falco/falco-2905-new-falcoctl-driver-loader.json new file mode 100644 index 00000000..f15c949b --- /dev/null +++ b/solutions/cncf-generated/falco/falco-2905-new-falcoctl-driver-loader.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:53.221Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "falco: new: falcoctl driver loader", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**Any specific area of the project related to this PR?**\n\n/area build\n\n**What this PR does / why we need it**:\n\nThis PR drops old falco-driver-loader script in favor of new `falcoctl driver` command.\n\n**Which issue(s) this PR fixes**:\n\nFixes #2675\n\n**Special notes for your reviewer**:\n\nThis is `wip` because falcoctl's PR (https://github.com/falcosecurity/falcoctl/pull/343) is still to be merged and this will need some more work.\nI opened this one to give an idea of the final look.\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nnew!: dropped falco-driver-loader script in favor of new falcoctl driver command\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n\n> Uncomment one (or more) `/kind <>` lines:\n\n> /kind bug\n\n> /kind cleanup\n\n> /kind design\n\n> /kind documentation\n\n> /kind failing-test\n\n/kind feature\n\n> /kind release\n\n**Any specific area of the project related to this PR?**\n\n> Uncomment one (or more) `/area <>` lines:\n\n> /area build\n\n> /area engine\n\n> /area tests\n\n> /area proposals\n\n> /area CI\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\nRefs #2574\n\nThese new environment variables will be recognized when the user attempts the unattended installation of Falco. \nThey'll expand the customization options letting the user choose their preferred driver and the use of falcoctl. In other words, it's an alternative to the dialog input.\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nnew(scripts): add a way to enforce driver kind and falcoctl enablement when installing Falco from packages and dialog is not present.\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\r\n\r\n**What type of PR is this?**\r\n\r\n> Uncomment one (or more) `/kind <>` lines:\r\n\r\n> /kind bug\r\n\r\n> /kind cleanup\r\n\r\n> /kind design\r\n\r\n> /kind documentation\r\n\r\n> /kind failing-test\r\n\r\n/kind feature\r\n\r\n> /kind release\r\n\r\n\r\n\r\n**Any specific area of the project related to this PR?**\r\n\r\n> Uncomment one (or more) `/area <>` lines:\r\n\r\n> /area build\r\n\r\n> /area engine\r\n\r\n> /area tests\r\n\r\n> /area proposals\r\n\r\n> /area CI\r\n\r\n\r\n\r\n**What this PR does / why we need it**:\r\n\r\n**Which issue(s) this PR fixes**:\r\n\r\nRefs #2574\r\n\r\n\r\n\r\nThese new environment variables will be recognized when the user attempts the unattended installation of Falco. \r\nThey'll expand the customization options letting the user choose their preferred driver and the use of falcoctl. In other words, it's an alternative to the dialog input.\r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n\r\n", + "Note: once https://github.com/falcosecurity/falco/pull/2413 gets merged, this should be at least ready for local testing with the CI produced packages.\nbuild-packages is failing with:" + ] + } + }, + "metadata": { + "tags": [ + "falco", + "graduated", + "security", + "kind-feature", + "release-note", + "dco-signoff--yes", + "lgtm", + "approved", + "size-xxl", + "area-build" + ], + "category": "security", + "cncfProjects": [ + "falco" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/falcosecurity/falco/pull/2905", + "sourceRepo": "falcosecurity/falco", + "reactions": 3, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:46:53.221Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/falco/falco-2981-new-engine-add-selective-rule-overrides.json b/solutions/cncf-generated/falco/falco-2981-new-engine-add-selective-rule-overrides.json new file mode 100644 index 00000000..15470858 --- /dev/null +++ b/solutions/cncf-generated/falco/falco-2981-new-engine-add-selective-rule-overrides.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:44.429Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "falco: new(engine): add selective rule overrides", + "description": "**What type of PR is this?**\n\n> Uncomment one (or more) `/kind <>` lines:\n\n/kind feature\n\n**Any specific area of the project related to this PR?**\n\n> Uncomment one (or more) `/area <>` lines:\n\n/area engine\n\n/area tests\n\n**What this PR does / why we need it**:\n\nThis is a rather straightforward implementation of https://github.com/falcosecurity/falco/issues/1340#issuecomment-1710135769 . See the comment for examples.\n\nIt is now possible to use the `override` key in rules, lists and macros. Some fields can be overridden: for lists and macros there is only one field that we care about, while for rules, the following fields have been selected:\n* Available for `append`: `{\"condition\", \"output\", \"desc\", \"tags\", \"exceptions\"}`\n* Available for `replace`: `{\"condition\", \"output\", \"desc\", \"priority\", \"tags\", \"exceptions\", \"enabled\", \"warn_evttypes\", \"skip-if-unknown-filter\"}`\n\nAs per the discussion linked above, it is an error to specify both `append: true` and any `override`. Given the syntax it", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR may bring feature or behavior changes in the Falco engine and may require the engine version to be bumped.\n\nPlease double check **userspace/engine/falco_engine_version.h** file. See [versioning for FALCO_ENGINE_VERSION](https://github.com/falcosecurity/falco/blob/master/RELEASE.md#falco-repo-this-repo).\n\n/hold", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "falco", + "graduated", + "security", + "kind-feature", + "area-engine", + "release-note", + "dco-signoff--yes", + "lgtm", + "approved", + "size-xxl", + "area-tests" + ], + "category": "security", + "cncfProjects": [ + "falco" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/falcosecurity/falco/pull/2981", + "sourceRepo": "falcosecurity/falco", + "reactions": 5, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:46:44.430Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/falco/falco-3307-new-ci-use-zig-compiler-instead-of-relying-on-centos7.json b/solutions/cncf-generated/falco/falco-3307-new-ci-use-zig-compiler-instead-of-relying-on-centos7.json new file mode 100644 index 00000000..c302e2d9 --- /dev/null +++ b/solutions/cncf-generated/falco/falco-3307-new-ci-use-zig-compiler-instead-of-relying-on-centos7.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:46.014Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "falco: new(ci): use `zig` compiler instead of relying on centos7.", + "description": "**What type of PR is this?**\n\n/kind cleanup\n\n**Any specific area of the project related to this PR?**\n\n/area CI\n\n**What this PR does / why we need it**:\n\nThis PR drops centos7 from our CI by instead relying on zig compiler to provide glibc 2.17 compatible builds for us.\n\n**Which issue(s) this PR fixes**:\n\nFixes #3270\n\n**Special notes for your reviewer**:\n\nLinked libs PRs:\n* https://github.com/falcosecurity/libs/pull/2034 -> new c-ares builds with zig \n* https://github.com/falcosecurity/libs/pull/2036 -> libanl is not present under zig; fixes build\n* https://github.com/falcosecurity/libs/pull/2037 -> title says it all\n* https://github.com/falcosecurity/libs/pull/2043 -> avoids Falco linking issue `undefined symbol: google::protobuf::internal::InternalMetadata::~InternalMetadata()`\n* https://github.com/falcosecurity/libs/pull/2045 -> fixes arm64 `Trace/breakpoint trap` issue for threadinfo and k8saudit plugin opening (source_plugin.c UB)\n* https://github.com/falcosecurity/libs/pull/2049 ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n\n/kind cleanup\n\n**Any specific area of the project related to this PR?**\n\n/area build\n\n**Does this PR require a change in the driver versions?**\n\n**What this PR does / why we need it**:\n\nAlso, now c-ares supports cmake, therefore make use of it.\n\n**Which issue(s) this PR fixes**:\n\nFixes #\n\n**Special notes for your reviewer**:\n\nThis should help resolving a build issue in https://github.com/falcosecurity/falco/pull/3307.\nWhile not fundamental for now, it is a nice to have.\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Tue Sep 3 10:46:35 2024: Loaded event sources: syscall\r\nTue Sep 3 10:46:35 2024: Enabled event sources: syscall\r\nTue Sep 3 10:46:35 2024: Opening 'syscall' source with Kernel module\r\nTrace/breakpoint trap", + "**What type of PR is this?**\r\n\r\n/kind cleanup\r\n\r\n**Any specific area of the project related to this PR?**\r\n\r\n/area build\r\n\r\n**Does this PR require a change in the driver versions?**\r\n\r\n**What this PR does / why we need it**:\r\n\r\nAlso, now c-ares supports cmake, therefore make use of it.\r\n\r\n**Which issue(s) this PR fixes**:\r\n\r\n\r\n\r\nFixes #\r\n\r\n**Special notes for your reviewer**:\r\n\r\nThis should help resolving a build issue in https://github.com/falcosecurity/falco/pull/3307.\r\nWhile not fundamental for now, it is a nice to have.\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n\r\n", + "[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *FedeDP*\n\nThe full list of commands accepted by this bot can be found [here](https://prow.falco.org/command-help?repo=falcosecurity%2Ffalco).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[OWNERS](https://github.com/falcosecurity/falco/blob/master/OWNERS)~~ [FedeDP]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n/milestone TBD\nSo, we need to disable the build with shared libelf, because `zig` statically links all specified libraries; see: https://github.com/ziglang/zig/issues/7094\r\nIndeed the CI complains it cannot find system libelf.\r\nSince this was an :hot_pepper: topic for the CNCF, i am going to hold this PR; at the same time, i think we should actually find a way to drop centos:7 while maintaining same glibc requirements.\r\n\r\n\r\nAnyway, even trying with BUNDLED_LIBELF, gives a build error...\r\n\r\n/hold\nNow failing the build on \r\n> ld.lld: error: undefined symbol: arc4random_buf\r\n>>> referenced by ares_rand.c:270\r\n\r\nThat's probably because c-ares does some magic to detect that symbol: https://github.com/c-ares/c-ares/blob/main/CMakeLists.txt#L456; that symbol was added in glibc2.36; most probably it is detecting the symbol even if `zig` is not actually supporting it since we are targeting a build with glibc-2.17.\nI tried to build `c-ares` from scratch (ie: without Falco involved at all) with `zig` and it worked fine with the same `export`s we are doing in the CI.\nUpdate: i saw that updating c-ares to 1.33.1 (latest release) fixed the issue. I am going to bump it in libs (if possible, ie: if grpc does not complain :/ )\r\n\r\nBump PR in libs: https://github.com/falcosecurity/libs/pull/2034\r\n\r\nI now bumped this one to use head of the libs PR with the updated c-ares. :crossed_fingers: \nSo, i now bumped this branch to use libs [`chore/zig_build` branch HEAD](https://github.com/falcosecurity/libs/tree/chore/zig_build); that is a branch i pushed upstream with all zig related fixes on it:\r\n* https://github.com/falcosecurity/libs/pull/2034\r\n* https://github.com/falcosecurity/libs/pull/2036\r\n* https://github.com/falcosecurity/libs/pull/2037\r\n* https://github.com/falcosecurity/libs/pull/2043\r\n\r\nOn that branch, i was able to build a full BUNDLED_DEPS version of sinsp-example!\r\n\r\nMoreover, i found out that CMAKE_{C,CXX}_COMPILER truncates `CC` and `CXX` strings at first whitespace, leading with cmake using `zig` as compiler for both, instead of `zig cc` and `zig c++`.\r\nI created 2 small wrapper scripts in CI to workaround this issue:" + ] + } + }, + "metadata": { + "tags": [ + "falco", + "graduated", + "security", + "kind-cleanup", + "release-note", + "dco-signoff--yes", + "lgtm", + "size-m", + "approved", + "area-ci" + ], + "category": "security", + "cncfProjects": [ + "falco" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/falcosecurity/falco/pull/3307", + "sourceRepo": "falcosecurity/falco", + "reactions": 4, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:46:46.014Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/falco/falco-776-roadmap-build-slim-and-full-falco-container-images.json b/solutions/cncf-generated/falco/falco-776-roadmap-build-slim-and-full-falco-container-images.json new file mode 100644 index 00000000..422d3c36 --- /dev/null +++ b/solutions/cncf-generated/falco/falco-776-roadmap-build-slim-and-full-falco-container-images.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:43.329Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "falco: ROADMAP - Build slim and full Falco container images", + "description": "Signed-off-by: Michael Ducy \n\n**What type of PR is this?**\n/kind feature\n\n**Any specific area of the project related to this PR?**\n\n/area deployment\n\n**What this PR does / why we need it**:\nThe current Falco images are quite large (720 MB) and carry lots of build tools that we want to remove from the current image. The build tools are required to compile kernel modules dynamically when the Falco container starts. \n\nUnder the new design we wish to remove the build tools and create two image categories:\n\n- [ ] Init container image for Kernel Module/eBPF probe delivery\n - [ ] Container for building probe dynamically (current model)\n - [x] Container for pulling module via HTTPS\n - [ ] Container for building & packaging custom kernel modules\n - [ ] Container(s) shipping prebuilt modules \n- [ ] Falco container image containing the minimum required software\n - [x] Minimal Image `falcosecurity/falco-minimal` - only required executables and libraries (~19.5mb)\n ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Note that the Falco application images can be tested by using the `falcosecurity/falco-minimal` or `falcosecurity/falco-slim` images. Currently I've only built a falco probe container image for the linuxkit kernel (docker desktop, `falcosecurity/probe-linuxkit-4.9.184`). \n\nYou can test by running the following:\n```\ndocker run --rm --privileged falcosecurity/probe-linuxkit-4.9.184\ndocker run --rm -i -t --name falco --privileged -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro falcosecurity/falco-slim:0.17.0\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Note that the Falco application images can be tested by using the `falcosecurity/falco-minimal` or `falcosecurity/falco-slim` images. Currently I've only built a falco probe container image for the linuxkit kernel (docker desktop, `falcosecurity/probe-linuxkit-4.9.184`). \r\n\r\nYou can test by running the following:" + ] + } + }, + "metadata": { + "tags": [ + "falco", + "graduated", + "security", + "kind-feature", + "size-xl", + "release-note", + "dco-signoff--yes", + "lgtm", + "approved" + ], + "category": "security", + "cncfProjects": [ + "falco" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/falcosecurity/falco/pull/776", + "sourceRepo": "falcosecurity/falco", + "reactions": 6, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:46:43.329Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluentd/fluentd-4185-in-tail-use-inode-for-key-of-tailwatcher-when-follow-inodes.json b/solutions/cncf-generated/fluentd/fluentd-4185-in-tail-use-inode-for-key-of-tailwatcher-when-follow-inodes.json new file mode 100644 index 00000000..533ebf06 --- /dev/null +++ b/solutions/cncf-generated/fluentd/fluentd-4185-in-tail-use-inode-for-key-of-tailwatcher-when-follow-inodes.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:57.237Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "fluentd: in_tail: Use inode for key of TailWatcher when follow_inodes", + "description": "**Which issue(s) this PR fixes**: \nPartially fixes #3614 (follow_inode true case)\n\n**What this PR does / why we need it**: \nThank you for reading to the great developer's!\nThis PR is fixed for unexpected file close after logs rotate in fluentd v1.16.1.\n\nI found the #3614 's reproducer.\nBefore applying this patch, fluentd cause unexpected file close after logs rotate every hour on my reproducer.\nAfter applying this patch, fluentd does not cause it on my reproducer.\nI have been running testing a long time.\n\n**Docs Changes**:\n\n**Release Note**:", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**Which issue(s) this PR fixes**: \nFixes #4190 \n\n**What this PR does / why we need it**:\n~~Add validation to make sure detach_watcher is detaching expected watcher. This can avoid unexpectedly detach new watcher created for new log file and lead to log stuck transiently.~~\n\nAdd log to check that detaching inode is the same as the detaching TailWatcher's inode when enabling `follow_inodes`.\n \nNote: If they do not match, canceling the detach (by adding `return`) may prevent an incorrect detach.\nSince #4208 will prevent an incorrect detach, we will only add the warning log in this PR for now.\n\n**Docs Changes**:\nN/A\n\n**Release Note**: \n~~Fix transient log stuck in in_tail when log file rotated and follow_inodes is enabled~~\nSame as the title.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "> Could you fix your name in the Author in your commit message?\r\n\r\nIt's resolved, thanks!\n> Fix #3614\r\n\r\nCould you add `partially` or something similar to this line in your commit message & the first comment of this PR?\r\nAs I described in #3614, it doesn't seem fix the issue on `follow_inode false` case.\nIt seems that CI stalls on all platforms.\n> > Fix #3614\r\n> \r\n> Could you add `partially` or something similar to this line in your commit message & the first comment of this PR? As I described in #3614, it doesn't seem fix the issue on `follow_inode false` case.\r\n\r\nThank you for commenting.\r\nKatsuya will handle your comment next week.\r\n\r\nBy the way, please let me explain why we didn't take care of `follow_inode false` case.\r\n\r\nTo fix this unexpected file close issue, we have to modify the tail plugin to create a tail list with hash values **generated by inode numbers**.\r\nBut users who use `follow_inode false` might want the plugin to create a tail list with hash values **generated by file names**.\r\nThat's why we hesitated to take care of `follow_inode false` case in this commit.\r\n\r\nIn our understanding, `follow_inode false` is just for keeping the compatibility with old verions of the tail plugin.\r\nhttps://docs.fluentd.org/input/tail#follow_inodes explains that `follow_inode false` can cause \"read rotated files duplicately\" problem.\r\nSo every user should use `follow_inode true` unless there is some particular reason.\r\n\r\nPlease let me know if our understanding is wrong.\r\nWe can change our code if needed.\n> It seems that CI stalls on all platforms.", + "The cause is `test_should_close_watcher_after_rotate_wait` doesn't follow the in_tail's modification.\r\nIn addition, your patch seems to be missing a fix.", + "In `follow_inodes true` case:" + ] + } + }, + "metadata": { + "tags": [ + "fluentd", + "graduated", + "observability", + "pending" + ], + "category": "observability", + "cncfProjects": [ + "fluentd" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/fluent/fluentd/pull/4185", + "sourceRepo": "fluent/fluentd", + "reactions": 1, + "comments": 71 + }, + "security": { + "scannedAt": "2026-02-27T17:46:57.237Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluentd/fluentd-4491-fix-line-skipping-issue-in-receive-lines-method.json b/solutions/cncf-generated/fluentd/fluentd-4491-fix-line-skipping-issue-in-receive-lines-method.json new file mode 100644 index 00000000..d1835624 --- /dev/null +++ b/solutions/cncf-generated/fluentd/fluentd-4491-fix-line-skipping-issue-in-receive-lines-method.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:55.473Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "fluentd: Fix line skipping issue in receive_lines method", + "description": "**Which issue(s) this PR fixes**: \nFixes #4494\n\n**What this PR does / why we need it**: \nBefore this patch, long lines could cause breakdowns in fluentd, potentially posing a vulnerability. With this patch, max_line_size will be integrated into the FIFO, enabling the system to skip lines exceeding the maximum size before executing receive_lines.\n\n**Docs Changes**:\n\n**Release Note**:", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@yugeeklab Thanks for this fix!\nCI is currently unstable because of #4487. We will fix it. Sorry for the trouble.\n\nI see the intent of this fix as follows.\n\n* In the current implementation, large lines that would eventually be discarded in `receive_lines` are temporarily held in `IOHandler`'s `@lines`.\n* This is a waste of memory.\n* This PR resolves the waste. \n\nSurely, such a fix would allow us to limit memory consumption by the `max_line_size` setting to some extent!\n\nThis PR would be effective to some extent, however I believe the problem of memory consumption will remain.\nIt would be possible that `FIFO`'s `@buffer` becomes unlimitedly large if the `@eol` does not appear in the data.\n\nAre these my understandings correct?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "short line\\n # To be committed to the pos\r\nvery long line not finished yet # Not to be committed to the pos until the `@eol` occurs.", + "git push -f" + ] + } + }, + "metadata": { + "tags": [ + "fluentd", + "graduated", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "fluentd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/fluent/fluentd/pull/4491", + "sourceRepo": "fluent/fluentd", + "reactions": 4, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:46:55.473Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluid/fluid-2649-feat-dataload-support-affinity.json b/solutions/cncf-generated/fluid/fluid-2649-feat-dataload-support-affinity.json new file mode 100644 index 00000000..fc3cc532 --- /dev/null +++ b/solutions/cncf-generated/fluid/fluid-2649-feat-dataload-support-affinity.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:35.564Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "fluid: Feat/dataload support affinity", + "description": "### Ⅰ. Describe what this PR does\n\ndataload support affinity,nodeSelector,tolerations,schedulerName\n\nfixes #2594", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/lgtm", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "fluid", + "incubating", + "orchestration", + "lgtm", + "approved" + ], + "category": "troubleshooting", + "cncfProjects": [ + "fluid" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/fluid-cloudnative/fluid/pull/2649", + "sourceRepo": "fluid-cloudnative/fluid", + "reactions": 2, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:43:35.564Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluid/fluid-985-add-support-for-dataset-runtime-webhook-pprof.json b/solutions/cncf-generated/fluid/fluid-985-add-support-for-dataset-runtime-webhook-pprof.json new file mode 100644 index 00000000..0e632a6d --- /dev/null +++ b/solutions/cncf-generated/fluid/fluid-985-add-support-for-dataset-runtime-webhook-pprof.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:36.849Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "fluid: Add support for dataset,runtime,webhook pprof", + "description": "### Ⅰ. Describe what this PR does\nadd instant live visualization of statistics for performance analysis\n### Ⅱ. Does this pull request fix one issue?\n\nfixes #780 \n\n### Ⅲ. List the added test cases (unit test/integration test) if any, please explain if no tests are needed.\n\n### Ⅳ. Describe how to verify it\n\n### Ⅴ. Special notes for reviews\nI cannot decide on nodeport number. please give me some advice. thanks.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thanks very much for your great contributions. If you'd like to add documentation, it will be great!", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "fluid", + "incubating", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "fluid" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/fluid-cloudnative/fluid/pull/985", + "sourceRepo": "fluid-cloudnative/fluid", + "reactions": 1, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:43:36.849Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-11020-add-typings-to-node-js-grpc-module.json b/solutions/cncf-generated/grpc/grpc-11020-add-typings-to-node-js-grpc-module.json new file mode 100644 index 00000000..f6354d60 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-11020-add-typings-to-node-js-grpc-module.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:42.509Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "grpc: Add typings to Node.js grpc module", + "description": "This PR closes #8233 \nIt adds the most basic typings for TypeScript of the public API of `grpc` for Node.js.\n\nAll details are not defined in this, as I'm no expert in reading the native parts of the module. I've used these definitions for a while and trusts the API. Let me know if anything needs to be corrected or otherwise modified.\n\nAll comments etc. are from the existing documentations.\n\nExample usage:\n\n```typescript\nimport * as grpc from 'grpc';\n\nconst server = new grpc.Server();\nserver.bind('0.0.0.0:7200', grpc.ServerCredentials.createInsecure());\nserver.start();\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Based on some suggested changes in #11020, I thought it would be a good idea to redefine the constants from C core in JavaScript instead of exposing them through the C extension, so that the documentation can be part of the generated Node documentation and so that they can be cross-referenced within the generated Node documentation.\n\nI know this creates code and documentation duplication between core and Node, but these things are part of my public API, so I have to make changes anyway if they change.\n\nCC @nicolasnoble Do you think this is a reasonable thing to do?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "cla--yes" + ], + "category": "networking", + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/grpc/grpc/pull/11020", + "sourceRepo": "grpc/grpc", + "reactions": 8, + "comments": 48 + }, + "security": { + "scannedAt": "2026-02-27T17:43:42.510Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-22032-aio-implement-server-interceptor-for-unary-unary-call.json b/solutions/cncf-generated/grpc/grpc-22032-aio-implement-server-interceptor-for-unary-unary-call.json new file mode 100644 index 00000000..fd310feb --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-22032-aio-implement-server-interceptor-for-unary-unary-call.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:50.624Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "grpc: [Aio] Implement server interceptor for unary unary call", + "description": "Support for interceptors for the unary-unary call at the server-side.\n\nFix #21914\nRef #20482", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@ZHmao also consider that current PR has conflicts that are preventing us to run the whole CI, I would appreciate if you could resolve the conflicts.\n\nThanks!", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "in _filter_server_interceptor: Function _GenericInterceptor.__init__ was called with the wrong arguments [wrong-arg-types]\r\n Expected: (self, fn: Callable[[Callable[[grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler]], grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler]])\r\n Actually passed: (self, fn: Callable[[Callable[[grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler]], grpc.HandlerCallDetails], grpc.RpcMethodHandler])", + "from typing import Callable, Awaitable\r\n\r\n\r\nasync def foo(a: str) -> str:\r\n return a\r\n\r\n\r\nasync def bar(fun: Callable[[str], Awaitable[str]]) -> str:\r\n return await fun('a')\r\n\r\n\r\nasync def go() -> None:\r\n await bar(foo)", + "If you have some free cycle and want to be a contributor of `pytype`, feel free to open an issue or post a PR directly to https://github.com/google/pytype. You can find their unit test here: https://github.com/google/pytype/blob/master/pytype/tests/py3/test_coroutine.py.\r\n\r\nOn the other hand, to move this PR forward, there are two (hacky) ways to solve the `pytype` complain.\r\n1. Use `Union` for the return value: `Union[grpc.RpcMethodHandler, Awaitable[grpc.RpcMethodHandler]]`;\r\n2. Use `Any` for the return value.\r\n\r\n@pfreixes Have you encountered this bug before?\nI'll use `Union` to move this PR forward, and open an issue or post a PR on pytype later.\nFYI, when I tried to fix this issue with `Union`, it failed again. Then I wrote another example more like ours.😅" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "kind-enhancement", + "lang-python", + "release-notes--yes" + ], + "category": "networking", + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/grpc/grpc/pull/22032", + "sourceRepo": "grpc/grpc", + "reactions": 4, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:43:50.624Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-22062-add-compatibility-for-python-s-eventlet-library.json b/solutions/cncf-generated/grpc/grpc-22062-add-compatibility-for-python-s-eventlet-library.json new file mode 100644 index 00000000..684364c6 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-22062-add-compatibility-for-python-s-eventlet-library.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:46.845Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "grpc: Add compatibility for Python's Eventlet library", + "description": "Trying to use the gRPC library in a Python program that uses the\nEventlet library doesn't work as expected, and we end up with deadlocks\nand blocked threads.\n\nThis patch adds a custom eventlet IO manager to provide compatibility\nbetween the Eventlet and gRPC libraries.\n\nThe code includes 2 workarounds for existing eventlet bugs, but this\ncode is compatible with the proposed PRs to solve them, so there should\nbe no problem if/when implements a fix for them:\n\n - https://github.com/eventlet/eventlet/issues/508\n - https://github.com/eventlet/eventlet/issues/599\n\nAn extra import and call is necessary for applications to use gRPC with\neventlets. This call must be done after eventlet has monkey patched the\nPython libraries.\n\nThe code would look something like this:\n\n import eventlet\n eventlet.monkey_patch() # noqa\n\n import grpc\n from grpc.experimental import eventlet as grpc_eventlet\n\n if __name__ == '__main__':\n grpc_eventlet.init_eventlet()\n\nResolve #15923\n\n@nicolasn", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "When working on the [eventlet Python I/O manager](https://github.com/grpc/grpc/pull/22062) I found some deficiencies when running Python tests with `run_tests.py`:\n\n- The `--forever` parameter doesn't work as expected, because there are false detection of changed source files because we write coverage and results in watched directories.\n\n- When passing `--runs_per_test` we cannot tell how many runs actually failed, we can only tell how many tests failed, but multiple tests may have failed in a single run.\n\n- Flaky tests in Python Custom I/O managers can only be disabled, we cannot do a single retry for them.\n\n- Passing `--runs_per_tests=1000` with `--use_docker` will result in all tests failing after a while because the container runs out of disk space.\n\nThis PR includes the changes I did to resolve these and be able to evaluate the impact that running the eventlet tests in the CI would have.\n\n@veblush", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "python tools/run_tests/run_tests.py --runs_per_test 1000 --language python --iomgr_platform eventlet" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "kind-enhancement", + "lang-python", + "release-notes--yes", + "kind-experimental-api", + "disposition-stale" + ], + "category": "networking", + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/grpc/grpc/pull/22062", + "sourceRepo": "grpc/grpc", + "reactions": 6, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:43:46.845Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-27121-drop-msse4-compiler-flag.json b/solutions/cncf-generated/grpc/grpc-27121-drop-msse4-compiler-flag.json new file mode 100644 index 00000000..78895839 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-27121-drop-msse4-compiler-flag.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:44.179Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "grpc: Drop -msse4 compiler flag", + "description": "Older CPUs that do not have SSE4.1 would crash with the Ruby native gem due to an illegal instruction exception.\n\nThe Abseil random library isn't being used at the moment (https://github.com/grpc/grpc/pull/26476), and there's no reason gRPC needs to force SSE4.1 instructions on all platforms at the moment. There are other hardware-specific issues that need to be ironed out for this to work: https://github.com/grpc/grpc/pull/26479\n\nWhen the `-msse4` compiler flag was enabled, the Abseil code started using the `pinsrb` instruction:\n\n```\n$ elfx86exts abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.o\nMODE64 (ret)\nCMOV (cmovne)\nSSE2 (movdqa)\nSSE41 (pinsrb)\nSSE1 (movaps)\nCPU Generation: Penryn\n```\n\nThis was previously needed because gcc 4.8 wouldn't compile without the `-msse4` and `-maes` flags.\n\nHowever, per https://github.com/gcc-mirror/gcc/commit/97db2bf7fb10e7eb2e8224e0471b56976f133843 gcc 5.0+ automatically detects whether these options are enabled.\n\nclang still needs `-maes` sin", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "`immintrin.h` is the de-factor standard header for clang and GCC to include Intel intrinsics. Using this header avoids requiring the compiler to use the `-maes` and `-msse4.1` compiler options on systems that may not have AES or SSE instruction support.\n\nclang: As seen in https://github.com/llvm-mirror/clang/blob/master/lib/Headers/immintrin.h,\nspecific intrinsic header files are conditionally included depending on whether the feature is available.\n\ngcc: As seen in https://github.com/gcc-mirror/gcc/blob/master/gcc/config/i386/immintrin.h, gcc includes all intrinsic header files, but each individual file guards against the feature not being available.\n\nThis came out of an investigation in https://github.com/grpc/grpc/pull/27121.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ elfx86exts abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.o\r\nMODE64 (ret)\r\nCMOV (cmovne)\r\nSSE2 (movdqa)\r\nSSE41 (pinsrb)\r\nSSE1 (movaps)\r\nCPU Generation: Penryn", + "This sounds like `-maes` **might** be necessary, but `-msse4` could be dropped. What do you think?\nI'm not sure what is defining `ABSL_ARCH_X86_64` because it's not being defined in my build. I added some garbage in the `elif` block, and gRPC compiled fine.\nOh, it looks like abseil defines this: https://github.com/abseil/abseil-cpp/blob/997aaf3a28308eba1b9156aa35ab7bca9688e9f6/absl/random/internal/platform.h#L63\r\n\nIt looks gcc 5 and up don't need these compiler flags because they are only enabled when specific flags are in use (https://github.com/gcc-mirror/gcc/commit/97db2bf7fb10e7eb2e8224e0471b56976f133843). For example:", + "`emmintrin.h` does this as well:" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "lang-core", + "area-build", + "release-notes--no", + "imported" + ], + "category": "networking", + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/grpc/grpc/pull/27121", + "sourceRepo": "grpc/grpc", + "reactions": 8, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:43:44.179Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-27660-make-the-gem-build-on-truffleruby.json b/solutions/cncf-generated/grpc/grpc-27660-make-the-gem-build-on-truffleruby.json new file mode 100644 index 00000000..aaa9b114 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-27660-make-the-gem-build-on-truffleruby.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:48.108Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "grpc: Make the gem build on TruffleRuby", + "description": "This set of changes makes it possible to install the `grpc` gem on TruffleRuby.\nTruffleRuby as part of GraalVM ships with its own LLVM toolchain and as such some changes are needed to make it build.\nThis fixes #23069.\nThis is important as several TruffleRuby users including Shopify cannot install the grpc gem out of the box without this fix.\nReplaces #24632.\n\n@markdroth", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "LGTM. Could you rebase it please? @eregon", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ chruby 3.0.3\r\n$ rake gem\r\n$ chruby truffleruby-dev\r\n$ gem i ./pkg/grpc-1.48.0.dev.gem\r\n$ ruby -v -e 'require \"grpc\"; p GRPC'\r\ntruffleruby 22.2.0-dev-8839057f, like ruby 3.0.3, GraalVM CE Native [x86_64-linux]\r\nGRPC" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "lang-ruby", + "release-notes--yes", + "imported", + "bloat-none", + "perf-change-none", + "per-call-memory-neutral" + ], + "category": "networking", + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/grpc/grpc/pull/27660", + "sourceRepo": "grpc/grpc", + "reactions": 5, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:43:48.108Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-28092-support-musllinux-binary-wheels-on-x64-and-x86.json b/solutions/cncf-generated/grpc/grpc-28092-support-musllinux-binary-wheels-on-x64-and-x86.json new file mode 100644 index 00000000..e3d7188a --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-28092-support-musllinux-binary-wheels-on-x64-and-x86.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:51.879Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "grpc: Support musllinux binary wheels on x64 and x86", + "description": "PyPA's wheel standard has welcomed musl-libc based binary wheels. This PR adds support to build and test musllinux binary wheels for gRPC Python, in x64/x86.\n\nAArch64 might need to wait for: 1. we have more accessible ARM compute resources; 2. Dockcross add \"musllinux\" (https://github.com/dockcross/dockcross) (or we build an image with cross-compiling CPython from source).\n\nFixes https://github.com/grpc/grpc/issues/27940 https://github.com/grpc/grpc/issues/26620 https://github.com/grpc/grpc/issues/25036\nRelated https://github.com/grpc/grpc/issues/27512 \n\nHere are some implementation details about this PR:\n\n- `mktemp` in Alpine works differently than Debian-based distros. It demands `XXXX` in the template of the temporary file/folder's name to always be suffix.\n- We used to rename \"grpcio-tools\" to \"grpc.tools\", which is already deprecated (see [guide](https://grpc.io/docs/languages/python/quickstart/#generate-grpc-code)). The correct import name is \"grpc_tools\" (...why it isn't grpcio-", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "How frequently do we think these tests will catch something on presubmit that's not caught by other things?\nAnother way of asking: does this need to be a presubmit, or could it run on master only?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "lang-python", + "area-build", + "release-notes--yes", + "imported", + "bloat-none", + "perf-change-none" + ], + "category": "networking", + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [ + "Job" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/grpc/grpc/pull/28092", + "sourceRepo": "grpc/grpc", + "reactions": 3, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:43:51.879Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-29000-support-pre-built-binaries-for-ruby-3-1.json b/solutions/cncf-generated/grpc/grpc-29000-support-pre-built-binaries-for-ruby-3-1.json new file mode 100644 index 00000000..7441f832 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-29000-support-pre-built-binaries-for-ruby-3-1.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:40.952Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "grpc: Support pre-built binaries for Ruby 3.1", + "description": "## Why?\nFixes #28627\n\n## What are the changes?\nI added 3.1.1 as a RUBY_CC_VERSION. I had to upgrade rake-compiler-dock to 1.2.0 as this is the only version of this gem that supports compilation for ruby 3.1+", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "Currently having problems with ruby 3.1 incompatibility of google-protobuf", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "% chruby 3.1.0\r\n\r\n% ruby -v\r\nruby 3.1.0p0 (2021-12-25 revision fb4df44d16) [x86_64-linux]\r\n\r\n% curl -O https://packages.grpc.io/archive/2022/04/5e989cf78d160fbe7279abd3168eda0627fa0ba2-f5497f9a-07f8-46a2-8bcd-37ce22fd08c1/ruby/grpc-1.46.0.dev-x86_64-linux.gem\r\n\r\n% gem install grpc-1.46.0.dev-x86_64-linux.gem\r\nFetching google-protobuf-3.20.0-x86_64-linux.gem\r\nFetching googleapis-common-protos-types-1.3.1.gem\r\nSuccessfully installed google-protobuf-3.20.0-x86_64-linux\r\nSuccessfully installed googleapis-common-protos-types-1.3.1\r\nSuccessfully installed grpc-1.46.0.dev-x86_64-linux\r\nParsing documentation for google-protobuf-3.20.0-x86_64-linux\r\nunable to convert \"\\xDB\" from ASCII-8BIT to UTF-8 for lib/google/protobuf/descriptor_pb.rb, skipping\r\nInstalling ri documentation for google-protobuf-3.20.0-x86_64-linux\r\nParsing documentation for googleapis-common-protos-types-1.3.1\r\nInstalling ri documentation for googleapis-common-protos-types-1.3.1\r\nParsing documentation for grpc-1.46.0.dev-x86_64-linux\r\nInstalling ri documentation for grpc-1.46.0.dev-x86_64-linux\r\nDone installing documentation for google-protobuf, googleapis-common-protos-types, grpc after 0 seconds\r\n3 gems installed\r\n\r\n% gem list grpc\r\n\r\n*** LOCAL GEMS ***\r\n\r\ngrpc (1.46.0.dev x86_64-linux)\r\n\r\n% irb\r\n>> require 'grpc'\r\n:85:in `require': cannot load such file -- grpc/grpc_c (LoadError)\r\n from :85:in `require' \r\n from ~/.gem/ruby/3.1.0/gems/grpc-1.46.0.dev-x86_64-linux/src/ruby/lib/grpc/grpc.rb:22:in `' \r\n from ~/.gem/ruby/3.1.0/gems/grpc-1.46.0.dev-x86_64-linux/src/ruby/lib/grpc.rb:19:in `require_relative' \r\n from ~/.gem/ruby/3.1.0/gems/grpc-1.46.0.dev-x86_64-linux/src/ruby/lib/grpc.rb:19:in `' \r\n from :160:in `require' \r\n from :160:in `rescue in require' \r\n from :149:in `require'\r\n\tfrom (irb):1:in `
'\r\n\tfrom ~/.rubies/ruby-3.1.0/lib/ruby/gems/3.1.0/gems/irb-1.4.1/exe/irb:11:in `'\r\n\tfrom ~/.rubies/ruby-3.1.0/bin/irb:25:in `load'\r\n\tfrom ~/.rubies/ruby-3.1.0/bin/irb:25:in `
'\r\n:85:in `require': cannot load such file -- grpc (LoadError)\r\n\tfrom :85:in `require'\r\n\tfrom (irb):1:in `
'\r\n\tfrom ~/.rubies/ruby-3.1.0/lib/ruby/gems/3.1.0/gems/irb-1.4.1/exe/irb:11:in `'\r\n\tfrom ~/.rubies/ruby-3.1.0/bin/irb:25:in `load'\r\n\tfrom ~/.rubies/ruby-3.1.0/bin/irb:25:in `
'", + "% chruby 3.1.1\r\n\r\n% ruby -v\r\nruby 3.1.1p18 (2022-02-18 revision 53f5fc4236) [x86_64-linux]\r\n\r\n% curl -O https://packages.grpc.io/archive/2022/04/5e989cf78d160fbe7279abd3168eda0627fa0ba2-f5497f9a-07f8-46a2-8bcd-37ce22fd08c1/ruby/grpc-1.46.0.dev-x86_64-linux.gem\r\n\r\n% gem install grpc-1.46.0.dev-x86_64-linux.gem\r\nFetching google-protobuf-3.20.0-x86_64-linux.gem\r\nFetching googleapis-common-protos-types-1.3.1.gem\r\nSuccessfully installed google-protobuf-3.20.0-x86_64-linux\r\nSuccessfully installed googleapis-common-protos-types-1.3.1\r\nSuccessfully installed grpc-1.46.0.dev-x86_64-linux\r\nParsing documentation for google-protobuf-3.20.0-x86_64-linux\r\nunable to convert \"\\xDB\" from ASCII-8BIT to UTF-8 for lib/google/protobuf/descriptor_pb.rb, skipping\r\nInstalling ri documentation for google-protobuf-3.20.0-x86_64-linux\r\nParsing documentation for googleapis-common-protos-types-1.3.1\r\nInstalling ri documentation for googleapis-common-protos-types-1.3.1\r\nParsing documentation for grpc-1.46.0.dev-x86_64-linux\r\nInstalling ri documentation for grpc-1.46.0.dev-x86_64-linux\r\nDone installing documentation for google-protobuf, googleapis-common-protos-types, grpc after 0 seconds\r\n3 gems installed\r\n\r\n% gem list grpc\r\n\r\n*** LOCAL GEMS ***\r\n\r\ngrpc (1.46.0.dev x86_64-linux)\r\n\r\n% irb\r\n>> require 'grpc'\r\n:85:in `require': cannot load such file -- grpc/grpc_c (LoadError)\r\n from :85:in `require' \r\n from ~/.gem/ruby/3.1.1/gems/grpc-1.46.0.dev-x86_64-linux/src/ruby/lib/grpc/grpc.rb:22:in `' \r\n from ~/.gem/ruby/3.1.1/gems/grpc-1.46.0.dev-x86_64-linux/src/ruby/lib/grpc.rb:19:in `require_relative' \r\n from ~/.gem/ruby/3.1.1/gems/grpc-1.46.0.dev-x86_64-linux/src/ruby/lib/grpc.rb:19:in `' \r\n from :160:in `require' \r\n from :160:in `rescue in require' \r\n from :149:in `require'\r\n\tfrom (irb):1:in `
'\r\n\tfrom ~/.rubies/ruby-3.1.1/lib/ruby/gems/3.1.0/gems/irb-1.4.1/exe/irb:11:in `'\r\n\tfrom ~/.rubies/ruby-3.1.1/bin/irb:25:in `load'\r\n\tfrom ~/.rubies/ruby-3.1.1/bin/irb:25:in `
'\r\n:85:in `require': cannot load such file -- grpc (LoadError)\r\n\tfrom :85:in `require'\r\n\tfrom (irb):1:in `
'\r\n\tfrom ~/.rubies/ruby-3.1.1/lib/ruby/gems/3.1.0/gems/irb-1.4.1/exe/irb:11:in `'\r\n\tfrom ~/.rubies/ruby-3.1.1/bin/irb:25:in `load'\r\n\tfrom ~/.rubies/ruby-3.1.1/bin/irb:25:in `
'", + "% chruby 3.1.2\r\n\r\n% ruby -v\r\nruby 3.1.2p20 (2022-04-12 revision 4491bb740a) [x86_64-linux]\r\n\r\n% curl -O https://packages.grpc.io/archive/2022/04/5e989cf78d160fbe7279abd3168eda0627fa0ba2-f5497f9a-07f8-46a2-8bcd-37ce22fd08c1/ruby/grpc-1.46.0.dev-x86_64-linux.gem\r\n\r\n% gem install grpc-1.46.0.dev-x86_64-linux.gem\r\nFetching googleapis-common-protos-types-1.3.1.gem\r\nFetching google-protobuf-3.20.0-x86_64-linux.gem\r\nSuccessfully installed google-protobuf-3.20.0-x86_64-linux\r\nSuccessfully installed googleapis-common-protos-types-1.3.1\r\nSuccessfully installed grpc-1.46.0.dev-x86_64-linux\r\nParsing documentation for google-protobuf-3.20.0-x86_64-linux\r\nunable to convert \"\\xDB\" from ASCII-8BIT to UTF-8 for lib/google/protobuf/descriptor_pb.rb, skipping\r\nInstalling ri documentation for google-protobuf-3.20.0-x86_64-linux\r\nParsing documentation for googleapis-common-protos-types-1.3.1\r\nInstalling ri documentation for googleapis-common-protos-types-1.3.1\r\nParsing documentation for grpc-1.46.0.dev-x86_64-linux\r\nInstalling ri documentation for grpc-1.46.0.dev-x86_64-linux\r\nDone installing documentation for google-protobuf, googleapis-common-protos-types, grpc after 0 seconds\r\n3 gems installed\r\n\r\n% gem list grpc\r\n\r\n*** LOCAL GEMS ***\r\n\r\ngrpc (1.46.0.dev x86_64-linux)\r\n\r\n% irb\r\n>> require 'grpc'\r\n:85:in `require': cannot load such file -- grpc/grpc_c (LoadError) \r\n from :85:in `require' \r\n from ~/.gem/ruby/3.1.2/gems/grpc-1.46.0.dev-x86_64-linux/src/ruby/lib/grpc/grpc.rb:22:in `' \r\n from ~/.gem/ruby/3.1.2/gems/grpc-1.46.0.dev-x86_64-linux/src/ruby/lib/grpc.rb:19:in `require_relative' \r\n from ~/.gem/ruby/3.1.2/gems/grpc-1.46.0.dev-x86_64-linux/src/ruby/lib/grpc.rb:19:in `' \r\n from :160:in `require' \r\n from :160:in `rescue in require' \r\n from :149:in `require'\r\n\tfrom (irb):1:in `
'\r\n\tfrom /opt/rubies/ruby-3.1.2/lib/ruby/gems/3.1.0/gems/irb-1.4.1/exe/irb:11:in `'\r\n\tfrom /opt/rubies/ruby-3.1.2/bin/irb:25:in `load'\r\n\tfrom /opt/rubies/ruby-3.1.2/bin/irb:25:in `
'\r\n:85:in `require': cannot load such file -- grpc (LoadError)\r\n\tfrom :85:in `require'\r\n\tfrom (irb):1:in `
'\r\n\tfrom /opt/rubies/ruby-3.1.2/lib/ruby/gems/3.1.0/gems/irb-1.4.1/exe/irb:11:in `'\r\n\tfrom /opt/rubies/ruby-3.1.2/bin/irb:25:in `load'\r\n\tfrom /opt/rubies/ruby-3.1.2/bin/irb:25:in `
'" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "lang-ruby", + "release-notes--yes", + "imported", + "bloat-none", + "per-call-memory-neutral" + ], + "category": "networking", + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/grpc/grpc/pull/29000", + "sourceRepo": "grpc/grpc", + "reactions": 11, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:43:40.952Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-29857-set-correct-platform-tag-in-wheels-on-mac-os-with-python-3-10.json b/solutions/cncf-generated/grpc/grpc-29857-set-correct-platform-tag-in-wheels-on-mac-os-with-python-3-10.json new file mode 100644 index 00000000..ae90da18 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-29857-set-correct-platform-tag-in-wheels-on-mac-os-with-python-3-10.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:39.509Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "grpc: Set Correct Platform Tag in Wheels on Mac OS with Python 3.10", + "description": "Intended to fix https://github.com/grpc/grpc/issues/28387\n\nAfter much experimentation (seriously, look at the commit history), I found that the \"ARCHFLAGS\" approach just wasn't working. Instead, I simply rename the artifact. This has been tested to work on an M1 mac running with Rosetta.\n\nThis is a _hack_. Long term, we'll need to figure out why `distutils` is not generating the proper platform and fix that. We'll also want to generate both `arm64` artifacts and a proper `universal2` artifact containing both x64 and arm64 artifacts. However, this will unbreak M1 users in the short term.\n\nSide note: This PR also updates the Python 3.10 patch version installed on MacOS.", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "Backport https://github.com/grpc/grpc/pull/29857 to 1.47.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "lang-python", + "priority-p0-release-blocker", + "platform-macos", + "release-notes--yes", + "imported", + "bloat-none", + "per-call-memory-neutral" + ], + "category": "networking", + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/grpc/grpc/pull/29857", + "sourceRepo": "grpc/grpc", + "reactions": 14, + "comments": 2 + }, + "security": { + "scannedAt": "2026-02-27T17:43:39.509Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-36096-python-handle-blockingioerror-if-multiple-loops-are-bound-to-pollerco.json b/solutions/cncf-generated/grpc/grpc-36096-python-handle-blockingioerror-if-multiple-loops-are-bound-to-pollerco.json new file mode 100644 index 00000000..fdf1cbf9 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-36096-python-handle-blockingioerror-if-multiple-loops-are-bound-to-pollerco.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:49.598Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "grpc: [Python] Handle BlockingIOError if multiple loops are bound to PollerCompletionQueue", + "description": "Fixes #25364\n\nWhen using grpc with multiple asyncio event loops in different threads, we may have multiple loops bound to the same `PollerCompletionQueue`. If the event loops support file descriptor monitoring, each loop will be configured with `loop.add_reader` to handle read events on the notification socket. This means that when completion events are available, `PollerCompletionQueue._handle_events` may be called multiple times for the different loops, but only one call will actually receive the byte written to the notification socket. The other calls will raise `BlockingIOError`, which then bubbles up and gets printed by the event loop's default exception handler, resulting in spurious error messages since things are in fact working fine. This changes `_handle_events` to catch and supress these `BlockingIOError`s.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "When using grpc with asyncio from multiple threads, spurious `PollerCompletionQueue` errors are printed due to multiple event loops listening a socket to be notified of grpc events. More than one event loop may be woken when the completion queue writes a byte to the notification socket, but only one of the loops receives the data and the others raise a `BlockingIOError`. This doesn't actually cause a problem in the grpc still works with asyncio in multiple threads, but lots of spurious error messages are printed by the default exception handler. (See https://github.com/grpc/grpc/issues/25364 for discussion of the issue.)\n\nThis configures the asyncio event loop used for grpc with an exception handler that ignores these `BlockingIOError`s from the `PollerCompletionQueue` so that we don't spam the logs with scary-looking messages when using grpc with asyncio from multiple threads.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "in a python 3.12 virtualenv, but that fails with an error while building wheels. Any additional pointers you have about running tests locally would be greatly appreciated.\nHi, thanks for contributing the test! Do you mind share more information about the failures? In the meanwhile, you can also trigger the test using `bazel` like this:" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "lang-python", + "kokoro-run", + "release-notes--no" + ], + "category": "networking", + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/grpc/grpc/pull/36096", + "sourceRepo": "grpc/grpc", + "reactions": 5, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:43:49.598Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-8601-node-electron-build.json b/solutions/cncf-generated/grpc/grpc-8601-node-electron-build.json new file mode 100644 index 00000000..9d495524 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-8601-node-electron-build.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:45.310Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "grpc: Node electron build", + "description": "This allows the Node library to be built for Electron by following [the existing procedure on the Electron website](http://electron.atom.io/docs/tutorial/using-native-node-modules/). It also adds scripts for building artifacts for Electron and running the tests on Electron. This fixes #6138 and maybe #8166.\n\nElectron does not work with the libuv endpoint implementation, so we will continue to need the non-uv implementation. So, I also refactored the uv/non-uv split in the Node extension code.\n\nThis is built on #8588, so that should be merged first", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "When you say \"locally\", I assume you mean that you cloned the git repository. Did you update the submodules? (`git submodule update --init`)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "../src/boringssl/err_data.c:17:10: fatal error: 'openssl/base.h' file not found\n#include ", + "npm run electron-build -- --target=1.4.6", + "./tools/run_tests/build_node_electron.sh 1.4.6" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "lang-node", + "cla--yes" + ], + "category": "networking", + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/grpc/grpc/pull/8601", + "sourceRepo": "grpc/grpc", + "reactions": 7, + "comments": 33 + }, + "security": { + "scannedAt": "2026-02-27T17:43:45.310Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/hami/hami-1250-feat-add-informer-based-pod-cache-to-reduce-api-server-load.json b/solutions/cncf-generated/hami/hami-1250-feat-add-informer-based-pod-cache-to-reduce-api-server-load.json new file mode 100644 index 00000000..85f04809 --- /dev/null +++ b/solutions/cncf-generated/hami/hami-1250-feat-add-informer-based-pod-cache-to-reduce-api-server-load.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:26.241Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "hami: feat: add informer-based pod cache to reduce API server load", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**What this PR does / why we need it**:\n\nThis PR adds an informer-based pod cache mechanism to the vGPUmonitor component to significantly reduce Kubernetes API server load. The key improvements include:\n\n1. Implemented a local pod cache using Kubernetes Informers that only watches pods on the current node\n2. Added event handlers to maintain cache consistency on pod lifecycle events (add/update/delete)\n3. Optimized container list refresh with time-based throttling (default 30s interval)\n4. Added configurable resync interval via the `HAMI_RESYNC_INTERVAL` environment variable\n5. Improved container cleanup logic using cached pod information\n\n**Real-world problem scenario**: When the cluster has more than 50 nodes, the number of vGPUmonitor instances equals the number of nodes, creating excessive pod list query pressure on the kube-apiserver. In our testing, API server CPU usage significantly increased in large clusters. This optimization can re", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> Bot detected the issue body's language is not English, translate it automatically. 👯👭🏻🧑‍🤝‍🧑👫🧑🏿‍🤝‍🧑🏻👩🏾‍🤝‍👨🏿👬🏿 \n---- \ncc @archlitchi", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "hami", + "sandbox", + "app-definition", + "kind-enhancement", + "kind-feature", + "lgtm", + "dco-signoff--yes", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "hami" + ], + "targetResourceKinds": [ + "Pod", + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/Project-HAMi/HAMi/pull/1250", + "sourceRepo": "Project-HAMi/HAMi", + "reactions": 1, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:26.241Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/hami/hami-1271-fix-release-dangling-node-lock.json b/solutions/cncf-generated/hami/hami-1271-fix-release-dangling-node-lock.json new file mode 100644 index 00000000..4c67582c --- /dev/null +++ b/solutions/cncf-generated/hami/hami-1271-fix-release-dangling-node-lock.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:27.409Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "hami: fix: release dangling node lock", + "description": "**What type of PR is this?**\n\n/kind bug\n\n**Which issue(s) this PR fixes**:\nFixes #714 \nFixes #810 \nFixes #1244 \n\n## **Background**\n\nThe background details of the above issues and the history of the fixes are summarised here:\n\nThe root of the problem originates from the poor design of the device-plugin framework provided by k8s. In the device-plugin framework, the device-plugin can report the extended resources on the node through the ListAndWatch interface, and handle the initialization of the device through the Allocate interface, as well as mounting the necessary contents for the container to run, including the device file, the control device file, the driver directory, the command line tools, the environment variables, etc. Kubelet will call the Allocate interface of the device-plugin after the Pod is bound to the node. After the Pod is bound to a node, Kubelet calls the Allocate interface of the device-plugin to set up and mount the response for the container based on the returned ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/gemini review", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Check the UUID of GPUs on the node:", + "Then deploy using Depolyment and limit the scheduling scope to the corresponding node with use-uuid:" + ] + } + }, + "metadata": { + "tags": [ + "hami", + "sandbox", + "app-definition", + "kind-bug", + "lgtm", + "dco-signoff--yes", + "approved", + "size-m" + ], + "category": "workloads", + "cncfProjects": [ + "hami" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/Project-HAMi/HAMi/pull/1271", + "sourceRepo": "Project-HAMi/HAMi", + "reactions": 0, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:27.410Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/harbor/harbor-14905-allows-using-robot-accounts-for-image-replication.json b/solutions/cncf-generated/harbor/harbor-14905-allows-using-robot-accounts-for-image-replication.json new file mode 100644 index 00000000..eba2be0f --- /dev/null +++ b/solutions/cncf-generated/harbor/harbor-14905-allows-using-robot-accounts-for-image-replication.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:05.067Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "harbor: Allows using robot accounts for Image replication", + "description": "This patch (more of a bug fix) will allow using robot accounts for image replication. \n\nCurrently, it is not possible to create a robot account that can replicate images from one Harbor registry to another. (You need a System Admin account, big no go in corp environments)\n\nThe patch only changes `getProject` to call the API directly, instead of filtering the list of `getProjects` because `getProjects` needs admin level permission and hence is not possible to be used with robot accounts. \n\n## Side Effects\n\nThe change has no or littel side effects: \n![image](https://user-images.githubusercontent.com/1492007/122196489-92033780-ce97-11eb-9171-9b6471b8343f.png)\n\nits only called by `listProjects` and is always providing and expect a single result, \n\nThis patch only works for Harbor => 2.2.0 and resolves a bunch of open issues.\n\nresolves #14640, resolves #13384, resolves #13795\nrelated #8723\n\n## How to creaet a Robot Accounts for Replication\n\nIn order to a replication with robot accounts, a r", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@reasonerjt can you check that please?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Signed-off-by: Vadim Bauer \n\n# [Codecov](https://codecov.io/gh/goharbor/harbor/pull/14905?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor) Report\n> Merging [#14905](https://codecov.io/gh/goharbor/harbor/pull/14905?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor) (6aa517c) into [master](https://codecov.io/gh/goharbor/harbor/commit/c4f4e6e7e140c128aa0ff95fe8d2ac9e07ca75d5?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor) (c4f4e6e) will **increase** coverage by `0.01%`.\n> The diff coverage is `75.00%`.\n\n[![Impacted file tree graph](https://codecov.io/gh/goharbor/harbor/pull/14905/graphs/tree.svg?width=650&height=150&src=pr&token=6SOPrJGDVW&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor)](https://codecov.io/gh/goharbor/harbor/pull/14905?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor)" + ] + } + }, + "metadata": { + "tags": [ + "harbor", + "graduated", + "security" + ], + "category": "security", + "cncfProjects": [ + "harbor" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/goharbor/harbor/pull/14905", + "sourceRepo": "goharbor/harbor", + "reactions": 12, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:47:05.067Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/harbor/harbor-16190-upgrade-distribution-to-2-8.json b/solutions/cncf-generated/harbor/harbor-16190-upgrade-distribution-to-2-8.json new file mode 100644 index 00000000..6746510d --- /dev/null +++ b/solutions/cncf-generated/harbor/harbor-16190-upgrade-distribution-to-2-8.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:02.833Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "harbor: Upgrade distribution to 2.8", + "description": "## what\n\nUpgrade the [distribution package to 2.8](https://github.com/distribution/distribution/releases/tag/v2.8.0)\n\n## why\n\nHarbor is currently on 2.7.1 which was released in 2019. Full release notes available here https://github.com/distribution/distribution/releases/tag/v2.8.0\n\n## EDIT: IRSA support not included in this release\n\nSee https://github.com/distribution/distribution/pull/3552#issuecomment-1018761583\n\n~The 2.7 release uses an out of date AWS SDK which does not support the AWS AssumeRoleWithWebIdentity required for deployments under Kubernetes that use IRSA. This was fixed in the 2.8 release, see https://github.com/distribution/distribution/issues/3097. Support for IRSA is of growing importance. It is the recommended solution when running Kubernetes on AWS~\n\n~Unfortunately while distribution has a 2.8 branch they have not made a release. I pinned the upgrade to the last commit of the 2.8 release branch~\n\n~IRSA Documentation:~\n- ~Overview https://aws.amazon.com/blogs/openso", + "type": "upgrade", + "status": "completed", + "resolution": { + "summary": "This PR fixes an issue if you use S3 as a storage backend, where you cannot push layers larger than 10GB in size.\n\nThe registry fails with the following message:\n```\nharbor-registry-7fd5cd687b-xz4kc registry time=\"2022-02-01T10:11:34.289971246Z\" level=error msg=\"upload resumed at wrong offest: 5242880000 != 7615513624\" auth.user.name=\"harbor_registry_user\" go.version=go1.15.6 http.request.host=registry.example.org http.request.id= http.request.method=PUT http.request.remoteaddr= http.request.uri=\"/v2/\" http.request.useragent=\"docker/19.03.5 go/go1.12.12 git-commit/2ee0c57608 os/windows arch/amd64 UpstreamClient(Docker-Client/19.03.5 \\(windows\\))\" vars.name=\"\" vars.uuid=\n```\n\nThis problem was fixed upstream in the distribution/distribution repository through https://github.com/distribution/distribution/pull/2815 but never included in a release which is why I am adding it as a patch to the registry version that harbor is building.\n\nIf you want me to adapt https://github.com/goharbor/harbor/blob/main/Makefile#L111 as well please tell me.\n\nSigned-off-by: Franz Nemeth \n\nCloses #15719", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "harbor-registry-7fd5cd687b-xz4kc registry time=\"2022-02-01T10:11:34.289971246Z\" level=error msg=\"upload resumed at wrong offest: 5242880000 != 7615513624\" auth.user.name=\"harbor_registry_user\" go.version=go1.15.6 http.request.host=registry.example.org http.request.id= http.request.method=PUT http.request.remoteaddr= http.request.uri=\"/v2/\" http.request.useragent=\"docker/19.03.5 go/go1.12.12 git-commit/2ee0c57608 os/windows arch/amd64 UpstreamClient(Docker-Client/19.03.5 \\(windows\\))\" vars.name=\"\" vars.uuid=", + "github.com/docker/distribution v2.7.2-0.20211123191640-3b7b53456922+incompatible", + "# pins to github.com/docker/distribution v2.7.2-0.20211123191640-3b7b53456922+incompatible\r\ngo get github.com/docker/distribution@3b7b534569220c840993aad03e3eafe54b923f4d" + ] + } + }, + "metadata": { + "tags": [ + "harbor", + "graduated", + "security" + ], + "category": "security", + "cncfProjects": [ + "harbor" + ], + "targetResourceKinds": [ + "Deployment", + "Service", + "Role" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/goharbor/harbor/pull/16190", + "sourceRepo": "goharbor/harbor", + "reactions": 24, + "comments": 7 + }, + "security": { + "scannedAt": "2026-02-27T17:47:02.833Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/harbor/harbor-16796-add-date-time-format-setting-in-portal.json b/solutions/cncf-generated/harbor/harbor-16796-add-date-time-format-setting-in-portal.json new file mode 100644 index 00000000..e959593f --- /dev/null +++ b/solutions/cncf-generated/harbor/harbor-16796-add-date-time-format-setting-in-portal.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:06.296Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "harbor: Add date/time format setting in portal", + "description": "Currently, the format used for rendering dates and times is derived from the language/locale selected by the user. The formats used in the en-US locale (\"English\" in Harbor's GUI) are ambiguous and hard to understand for many users.\n\nFor example, is 10/11/21 the 10th of November, 2021, the 11th of October, 2021, or even something else like the 21nd of November, 2010? Even if one does know how to interpret it in theory, such dates are essentially enciphered and must be mentally deciphered by the user every time, incurring unnecessary cognitive load.\n\nSimilarly, many users are used to the 24-hour clock rather than the 12-hour clock (AM/PM), and so on.\n\nThis PR adds a dropdown next to the existing language selector that lets the user choose between the default format for the current locale and the internationally standardized, unambiguous ISO 8601 format. For example, when viewing a list of resources, the ISO 8601 option makes points in time display as\n\n> 2021-10-11, 13:37\n\ninstead of\n\n> ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "# [Codecov](https://codecov.io/gh/goharbor/harbor/pull/16796?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor) Report\n> Merging [#16796](https://codecov.io/gh/goharbor/harbor/pull/16796?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor) (8855b9d) into [main](https://codecov.io/gh/goharbor/harbor/commit/db45155365d66059db768a6b1278f820b9dc4317?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor) (db45155) will **decrease** coverage by `4.41%`.\n> The diff coverage is `45.71%`.\n\n[![Impacted file tree graph](https://codecov.io/gh/goharbor/harbor/pull/16796/graphs/tree.svg?width=650&height=150&src=pr&token=6SOPrJGDVW&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor)](https://codecov.io/gh/goharbor/harbor/pull/16796?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor)\n\n```diff\n@@ Coverage Diff @@\n## main #16796 +/- ##\n==========================================\n- Coverage 71.62% 67.20% -4.42% \n==========================================\n Files 736 966 +230 \n Lines 67491 80167 +12676 \n Branches 0 2550 +2550 \n==========================================\n+ Hits 48342 5387", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "harbor", + "graduated", + "security", + "release-note-enhancement", + "release-note-docs" + ], + "category": "security", + "cncfProjects": [ + "harbor" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/goharbor/harbor/pull/16796", + "sourceRepo": "goharbor/harbor", + "reactions": 12, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:47:06.296Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/harbor/harbor-17932-aws-auth-use-default-creds-when-none-are-supplied.json b/solutions/cncf-generated/harbor/harbor-17932-aws-auth-use-default-creds-when-none-are-supplied.json new file mode 100644 index 00000000..cb540245 --- /dev/null +++ b/solutions/cncf-generated/harbor/harbor-17932-aws-auth-use-default-creds-when-none-are-supplied.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:09.094Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "harbor: aws auth use default creds when none are supplied", + "description": "# Comprehensive Summary of your change\n\nHarbor should not assume that if AWS Credentials are not defined that the user wants to use `ec2rolecreds`. Rather, it should allow the AWS SDK to resolve credentials by passing `nil` Credentials:\n\nhttps://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials\n\nThis _should_ be backwards compatible because the defualt provider chain does look for: `4. If your application is running on an Amazon EC2 instance, IAM role for Amazon EC2.`\n\n# Issue being fixed\nFixes #15007\n\nRelated to #17962\n\nPlease indicate you've done the following:\n- [x] Well Written Title and Summary of the PR\n- [x] Label the PR as needed. \"release-note/ignore-for-release, release-note/new-feature, release-note/update, release-note/enhancement, release-note/community, release-note/breaking-change, release-note/docs, release-note/infra, release-note/deprecation\"\n- [x] Accepted the DCO. Commits without the DCO will delay acceptance.\n- [ ] Made ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Will close and re-open the PR so we can try to unblock GH actions!", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ export AWS_PROFILE=my-profile-name\r\n$ export AWS_SDK_LOAD_CONFIG=true\r\n$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com\r\n2022-12-08T13:21:24-05:00 [INFO] [/pkg/reg/adapter/native/adapter.go:36]: the factory for adapter docker-registry registered\r\n2022-12-08T13:21:24-05:00 [INFO] [/pkg/reg/adapter/awsecr/adapter.go:44]: the factory for adapter aws-ecr registered\r\n2022-12-08T13:21:30-05:00 [FATAL] [/awssecrettest/awssecrettest.go:38]: EC2RoleRequestError: no EC2 instance role found\r\ncaused by: RequestError: send request failed\r\ncaused by: Get \"http://169.254.169.254/latest/meta-data/iam/security-credentials/\": dial tcp 169.254.169.254:80: connect: no route to host", + "$ export AWS_PROFILE=my-profile-name\r\n$ export AWS_SDK_LOAD_CONFIG=true\r\n$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com\r\n2022-12-08T13:18:08-05:00 [INFO] [/pkg/reg/adapter/native/adapter.go:36]: the factory for adapter docker-registry registered\r\n2022-12-08T13:18:08-05:00 [INFO] [/pkg/reg/adapter/awsecr/adapter.go:44]: the factory for adapter aws-ecr registered\r\n", + "$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com \"bad\" \"bad\"\r\n2022-12-08T13:32:40-05:00 [INFO] [/pkg/reg/adapter/native/adapter.go:36]: the factory for adapter docker-registry registered\r\n2022-12-08T13:32:40-05:00 [INFO] [/pkg/reg/adapter/awsecr/adapter.go:44]: the factory for adapter aws-ecr registered\r\n2022-12-08T13:32:41-05:00 [FATAL] [/awssecrettest/awssecrettest.go:38]: UnrecognizedClientException: The security token included in the request is invalid.\r\n status code: 400, request id: 7c3a3f19-95c8-4cdd-9b39-a1cb07edab2b\r\nexit status 1" + ] + } + }, + "metadata": { + "tags": [ + "harbor", + "graduated", + "security", + "release-note-enhancement", + "target-2-8-0" + ], + "category": "security", + "cncfProjects": [ + "harbor" + ], + "targetResourceKinds": [ + "Role" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/goharbor/harbor/pull/17932", + "sourceRepo": "goharbor/harbor", + "reactions": 3, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:47:09.094Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/harbor/harbor-18105-fix-buildkit-cache-export-to-harbor.json b/solutions/cncf-generated/harbor/harbor-18105-fix-buildkit-cache-export-to-harbor.json new file mode 100644 index 00000000..7e798516 --- /dev/null +++ b/solutions/cncf-generated/harbor/harbor-18105-fix-buildkit-cache-export-to-harbor.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:01.345Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "harbor: Fix buildkit cache export to harbor.", + "description": "Thx to @XciD\n\nThis fixes goharbor/harbor#14774\n\nThank you for contributing to Harbor!\n\n# Comprehensive Summary of your change\n\n# Issue being fixed\nFixes #14774\n\nPlease indicate you've done the following:\n- [x] Well Written Title and Summary of the PR\n- [x] Label the PR as needed. \"release-note/ignore-for-release, release-note/new-feature, release-note/update, release-note/enhancement, release-note/community, release-note/breaking-change, release-note/docs, release-note/infra, release-note/deprecation\"\n- [x] Accepted the DCO. Commits without the DCO will delay acceptance.\n- [x] Made sure tests are passing and test coverage is added if needed.\n- [x] Considered the docs impact and opened a new docs issue or PR with docs changes if needed in [website repository](https://github.com/goharbor/website).", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fixes the issue where the caching didn't work for building dev and production container. According to the log, it's due to 404 when GHA tries to push docker image manifest to harbor: https://github.com/PelicanPlatform/pelican/actions/runs/7975774289/job/21774758514\n\nThe fix was found here: https://github.com/goharbor/harbor/pull/18105#issuecomment-1811951274\n\nAlso adding debug to buildx for more verbose log.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "corpusops/harbor-jobservice:v2.6.1\r\ncorpusops/harbor-core:v2.6.1\r\ncorpusops/harbor-portal:v2.6.1\r\ncorpusops/harbor-registryctl:v2.6.1\r\ncorpusops/harbor-db:v2.6.1\r\ncorpusops/registry-photon:v2.6.1", + "| [Flag](https://app.codecov.io/gh/goharbor/harbor/pull/18105/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor) | Coverage Δ | |\n|---|---|---|\n| [unittests](https://app.codecov.io/gh/goharbor/harbor/pull/18105/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor) | `67.39% <57.14%> (-0.01%)` | :arrow_down: |\n\nFlags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor#carryforward-flags-in-the-pull-request-comment) to find out more.\n\n| [Files](https://app.codecov.io/gh/goharbor/harbor/pull/18105?dropdown=coverage&src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor) | Coverage Δ | |\n|---|---|---|\n| [src/controller/artifact/abstractor.go](https://app.codecov.io/gh/goharbor/harbor/pull/18105?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor#diff-c3JjL2NvbnRyb2xsZXIvYXJ0aWZhY3QvYWJzdHJhY3Rvci5nbw==) | `65.21% <57.14%> (-0.84%)` | :arrow_down: |\n\n... and [9 files with indirect coverage changes](https://app.codecov.io/gh/goharbor/harbor/pull/18105/indirect-changes?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=goharbor)\n\n
\nIs the holdup the failing code coverage checks? If so I'd be willing to try and update the tests so they cover the new condition added.\nFWIW I have had success pushing the cache to Harbor by setting the new `image-manifest=true` key that was added in https://github.com/moby/buildkit/pull/3724\nThis PR is being marked stale due to a period of inactivty. If this PR is still relevant, please comment or remove the stale label. Otherwise, this PR will close in 30 days.\nnot stale\n@kiorky Why is it needed to hack the media types in the code? (If buildkit were to change e.g. \"application/vnd.buildkit.cacheconfig.v0\" to \"application/vnd.buildkit.cacheconfig.v1\", it would stop working.)\nThis PR is being marked stale due to a period of inactivty. If this PR is still relevant, please comment or remove the stale label. Otherwise, this PR will close in 30 days.\nNot stale.\nWe're currently running into this exact same issue also.\r\nWould also like to see this merged so that we can use buildkit cache using harbor.\nI'm still waiting on this to merge as well and would be happy to help if I can make that happen.\r\n\r\nIn case this helps anyone else, here are the steps I'm taking outside of harbor to get around this for now:\r\n\r\n1. Create a buildx builder using the latest `moby/buildkit:master`\r\n- This is required because, last time I tried, the default would not let me use the `image-manifest` arg required for the fix in the next step\r\n2. When specify the `cache-to` argument set `image-manifest=true`\r\n- Example `docker-bake.hcl`:", + "You can add this arg similarly to a command line call just as well. Hope this helps.\nusing Buildkit version 0.12 or higher will indeed work with the steps as mentioned by @bnjmn.\nI'm not quite understanding what's stopping you from using harbor caching already?\r\nFor an example of GitHub actions workflow:" + ] + } + }, + "metadata": { + "tags": [ + "harbor", + "graduated", + "security", + "stale", + "release-note-update" + ], + "category": "security", + "cncfProjects": [ + "harbor" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/goharbor/harbor/pull/18105", + "sourceRepo": "goharbor/harbor", + "reactions": 25, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:47:01.345Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/harbor/harbor-18686-feat-registry-support-assumerolewithwebidentity-for-s3.json b/solutions/cncf-generated/harbor/harbor-18686-feat-registry-support-assumerolewithwebidentity-for-s3.json new file mode 100644 index 00000000..29053ff7 --- /dev/null +++ b/solutions/cncf-generated/harbor/harbor-18686-feat-registry-support-assumerolewithwebidentity-for-s3.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:04.028Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "harbor: feat(registry): support AssumeRoleWithWebIdentity for S3", + "description": "# Comprehensive Summary of your change\n\nThis PR adds patches to the building of the Harbor registry image that update the AWS SDK and fix how the AWS S3 client is created so that it can use `AssumeRoleWithWebIdentity` through IRSA.\n\nThe upstream PRs used in the patches are https://github.com/distribution/distribution/pull/3921 that change the way the client is created and https://github.com/distribution/distribution/pull/3599 that updates the AWS SDK. I have built the image that results from this PR and verified I can push new images to Harbor while using IRSA. For testing the image `davidspek/registry-photon:v2.8.0-irsa-0.1.4` can be used.\n\n# Issue being fixed\nFixes #16490 and #12888\n\nPlease indicate you've done the following:\n- [x] Well Written Title and Summary of the PR\n- [x] Label the PR as needed. \"**release-note/new-feature**\"\n- [x] Accepted the DCO. Commits without the DCO will delay acceptance.\n- [x] Made sure tests are passing and test coverage is added if needed.\n- [x] Consi", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@DavidSpek can you demo the setup and configuration in the next community meeting, we can use recording for documentation.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "harbor", + "graduated", + "security" + ], + "category": "security", + "cncfProjects": [ + "harbor" + ], + "targetResourceKinds": [ + "Role" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/goharbor/harbor/pull/18686", + "sourceRepo": "goharbor/harbor", + "reactions": 18, + "comments": 29 + }, + "security": { + "scannedAt": "2026-02-27T17:47:04.028Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/harbor/harbor-21347-feat-single-active-replication.json b/solutions/cncf-generated/harbor/harbor-21347-feat-single-active-replication.json new file mode 100644 index 00000000..81e19fb4 --- /dev/null +++ b/solutions/cncf-generated/harbor/harbor-21347-feat-single-active-replication.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:07.945Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "harbor: feat: Single Active Replication", + "description": "### Single Active Replication per replication policy\n## Proposal: https://github.com/goharbor/community/pull/256\n## Summary\nThis PR addresses a long-standing issue where overlapping replications of the same policies can occur in Harbor, leading to unnecessary resource consumption and poor performance. By introducing a \"Single Active Replication\" checkbox in the replication policy, it ensures that replication tasks for the same policy do not run if there is already a replication running for the same policy, preventing bandwidth overload and queue backups, especially for large artifacts.\n\n## Similar Issues\n- https://github.com/goharbor/harbor/issues/19937\n- https://github.com/goharbor/harbor/issues/20532\n- https://github.com/goharbor/harbor/issues/17269\n- https://github.com/goharbor/harbor/issues/16656\n\n## Related Issues\n- https://github.com/goharbor/harbor/issues/15415\n- https://github.com/goharbor/harbor/issues/7842\n\n# Why do we need this\n\n1. Users have for long requesting this feature", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Proposal for Single Active Replication feature.\n\n### Discussion & PR: https://github.com/goharbor/harbor/pull/21347", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "harbor", + "graduated", + "security", + "area-replication", + "needs-proposal", + "release-note-new-feature", + "never-stale", + "needs-design", + "target-fc", + "target-2-14-0" + ], + "category": "security", + "cncfProjects": [ + "harbor" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/goharbor/harbor/pull/21347", + "sourceRepo": "goharbor/harbor", + "reactions": 4, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:47:07.945Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/headlamp/headlamp-1921-frontend-add-new-table-component-and-use-it-in-resourcetable.json b/solutions/cncf-generated/headlamp/headlamp-1921-frontend-add-new-table-component-and-use-it-in-resourcetable.json new file mode 100644 index 00000000..f3b24398 --- /dev/null +++ b/solutions/cncf-generated/headlamp/headlamp-1921-frontend-add-new-table-component-and-use-it-in-resourcetable.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:32.274Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "headlamp: frontend: Add new Table component and use it in ResourceTable", + "description": "We want more features in our tables. Stuff like sorting, filtering, selection, actions, etc. This is where the new `Table` component comes in and `SimpleTable` will remain simple.\n\nmaterial-react-table (MRT) was chosen because it uses the same ui framework, looks good, has all the feature we will need, MIT licensed, based on headless tanstack table and very customizable. \n\n`Table` component is mostly a wrapper around MRT with some sensible defaults and additional app specific behaviour (like storing page state in url). I kept most of the props as aliases to the MRT props so it can be extensible without introducing any plumbing. \n\nIn the scope of this PR I also updated ResourceTable component to use the new table. The only change from the 'outside' is that each column needs to provide getter and renderer functions, first is needed for filtering and sorting, second one is for displaying. Getter needs to return a string and since most of the existing column getters were simple functions t", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**Description:**\n---\nAs mentioned in #955, this pull request addresses the need to filter CRDs by categories. \nHere the features implemented:\n\n- **List of Categories**: Added the ability to list all available categories.\n- **Category Selection**: Implemented the functionality to select and unselect categories.\n- **Filtering**: Enabled filtering CRDs based on selected categories.\n\n**Implementation Details:**\n---\n- Introduced a new component, `CRDCategoriesList`, to facilitate category selection and filtering.\n- Utilized Redux for state management, specifically the `filterSlice`, to store and update the selected categories.\n- Integrated with existing CRD functionality to fetch and display categories dynamically.\n\n**Changes Made:**\n---\n- Added the `CRDCategoriesList` component to handle category selection and filtering.\n- Updated Redux state management to accommodate the new category filter.\n- Enhanced UI to display available categories and allow user interaction.\n\n**Feedback Request:**\n---\n- Any suggestions for optimizing loading time for the `CRDCategoriesList` component.\n- Review of Redux state management implementation and usage.\n- Feedback on UI/UX improvements or any potential issues.\n\n**Known Issues:**\n---\n- Currently experiencing loading time issues with the `CRDCategoriesList` component. Gather categories when initially loading the CRD may resolve the issue.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "headlamp", + "sandbox", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "headlamp" + ], + "targetResourceKinds": [ + "Pod", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes-sigs/headlamp/pull/1921", + "sourceRepo": "kubernetes-sigs/headlamp", + "reactions": 4, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:48:32.274Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/headlamp/headlamp-2231-frontend-introduce-react-query-and-v2-api.json b/solutions/cncf-generated/headlamp/headlamp-2231-frontend-introduce-react-query-and-v2-api.json new file mode 100644 index 00000000..7518ddc7 --- /dev/null +++ b/solutions/cncf-generated/headlamp/headlamp-2231-frontend-introduce-react-query-and-v2-api.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:40.048Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "headlamp: frontend: Introduce react-query and v2 API", + "description": "fixes #1700 \n\nThis PR introduces react-query as the library to perform and coordinate requests to the backend.\nIt brings nice quality of life improvements like caching, deduplicating requests, error handling, convenient react hooks.\n\nFor KubeObject classes, new methods were added: useQuery (alternative to useGet), useListQuery (alternative to useList). The old methods are left as-is (marked as deprecated) for compatibility with plugins. \n\nSome requests were not converted to limit the scope of this PR. Things like health and config requests for example, where caching might introduce problems, are left to do in next PRs. \n\n## Testing done\n\n- [x] Manually checked the application\n- [x] Ensured that the apiProxy exports stay the same after refactor (all the exported functions/types are present and unchanged)\n- [x] Manually checked plugins/examples\n- [x] Manually checked plugins repo plugins\n- [x] Manually checked app compiled with `make app-linux`\n- [x] Checked headlamp-plugin page reloaded", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Very nice work. I'm looking forward to these improvements.\n\nI don't have time for an in depth review right now... But my general questions of all PRs: are there docs, tests for new code, atomic commits? Can anything be broken out into separate PRs? How does it affect plugins? Adopting a new technology... were alternatives evaluated and is the decision reversable/or leaking to users? ( I guess @reduxjs/toolkit/query is the alternative here ). Is there something broken/deprecated and how is that going to be communicated?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "index-B1wInVTN.js:830 Error: Error: Unreachable\r\n at backendFetch (index-B1wInVTN.js:830:27146)\r\n at async clusterFetch (index-B1wInVTN.js:830:27624)\r\n at async Promise.any (/tmp/.mount_Headlae2jo9U/resources/frontend/index 1)", + "TypeError: Cannot read properties of undefined (reading 'pluralName')\r\n at xe (index-B1wInVTN.js:1128:19944)\r\n at index-B1wInVTN.js:1128:20388\r\n at Array.map ()\r\n at Overview (index-B1wInVTN.js:1128:20235)\r\n at um (vendor-mui-BWZhF6T-.js:38:17018)\r\n at nx (vendor-mui-BWZhF6T-.js:40:44058)\r\n at Jb (vendor-mui-BWZhF6T-.js:40:39790)\r\n at YR (vendor-mui-BWZhF6T-.js:40:39718)\r\n at Hc (vendor-mui-BWZhF6T-.js:40:39570)\r\n at Zp (vendor-mui-BWZhF6T-.js:40:35934)\r\njp @ vendor-mui-BWZhF6T-.js:40", + "> TypeError: Cannot read properties of undefined (reading 'pluralName')\r\n> at xe (index-B1wInVTN.js:1128:19944)\r\n> at index-B1wInVTN.js:1128:20388\r\n> at Array.map ()\r\n> at Overview (index-B1wInVTN.js:1128:20235)\r\n> at um (vendor-mui-BWZhF6T-.js:38:17018)\r\n> at nx (vendor-mui-BWZhF6T-.js:40:44058)\r\n> at Jb (vendor-mui-BWZhF6T-.js:40:39790)\r\n> at YR (vendor-mui-BWZhF6T-.js:40:39718)\r\n> at Hc (vendor-mui-BWZhF6T-.js:40:39570)\r\n> at Zp (vendor-mui-BWZhF6T-.js:40:35934)\r\n> jp @ vendor-mui-BWZhF6T-.js:40\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "headlamp", + "sandbox", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "headlamp" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes-sigs/headlamp/pull/2231", + "sourceRepo": "kubernetes-sigs/headlamp", + "reactions": 2, + "comments": 29 + }, + "security": { + "scannedAt": "2026-02-27T17:48:40.048Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/headlamp/headlamp-3024-ci-push-helm-chart-to-ghcr.json b/solutions/cncf-generated/headlamp/headlamp-3024-ci-push-helm-chart-to-ghcr.json new file mode 100644 index 00000000..13ce8818 --- /dev/null +++ b/solutions/cncf-generated/headlamp/headlamp-3024-ci-push-helm-chart-to-ghcr.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:34.079Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "headlamp: ci: push Helm Chart to GHCR", + "description": "- Bumped deps used in workflow\n- Push chart to GHCR\n- Fixes https://github.com/headlamp-k8s/headlamp/issues/2987", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "This was pointed out in https://github.com/kubernetes-sigs/headlamp/pull/3024/files#r2318193831\n\n## Summary\n\n```\nthis fails because it tries to push provenance files\n\nhttps://github.com/kubernetes-sigs/headlamp/actions/runs/17425865523/job/49472773512\n\nError: file '.cr-release-packages/headlamp-0.35.0.tgz.prov' does not appear to be a gzipped archive; got 'text/plain; charset=utf-8'\n```\n\n## Related Issue\n\nFixes #2987", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "this fails because it tries to push provenance files\r\n\r\nhttps://github.com/kubernetes-sigs/headlamp/actions/runs/17425865523/job/49472773512\r\n\r\nError: file '.cr-release-packages/headlamp-0.35.0.tgz.prov' does not appear to be a gzipped archive; got 'text/plain; charset=utf-8'" + ] + } + }, + "metadata": { + "tags": [ + "headlamp", + "sandbox", + "observability", + "kind-feature", + "charts", + "size-s", + "cncf-cla--yes" + ], + "category": "observability", + "cncfProjects": [ + "headlamp" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes-sigs/headlamp/pull/3024", + "sourceRepo": "kubernetes-sigs/headlamp", + "reactions": 4, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:34.080Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/headlamp/headlamp-3239-i18n-added-tamil-language.json b/solutions/cncf-generated/headlamp/headlamp-3239-i18n-added-tamil-language.json new file mode 100644 index 00000000..4fdee55b --- /dev/null +++ b/solutions/cncf-generated/headlamp/headlamp-3239-i18n-added-tamil-language.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:36.273Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "headlamp: i18n: Added tamil language", + "description": "I have added tamil language for the headlamp website which will help people who knows tamil to efficiently interact with the site.\n\nThis resolves issue no:- #3223 \n\nAttached Screenshot for reference :-\n\n![Tamil](https://github.com/user-attachments/assets/51e6c2c7-da45-417c-aabf-f118ef8162f8)", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "\"CLA

The committers listed above are authorized under a signed CLA.
  • :white_check_mark: login: HarshSrivastava275 (8a16dd56df7ce280b4e958333b7893d5e68aa0ef, fe9baf60fd3c63978971b15240244ddbc16497d0, 7e6501ac08c3eb8863b4b9ef39d3f60130a5eeff, 3128057c296cb2148e041dc9133c432223abd288, 35f2991cefd5facd099995f911192b7d7ccc0246, c6359ad60047f998af2f1be1094b730fd893709d, 04e50750df891fe996e0951e1e993da7db959b19)
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "+ \"Use evict for pod deletion\": \"\",\r\n+ \"Evict\": \"\",\r\n+ \"Evict Pod\": \"\",\r\n+ \"Are you sure you want to evict pod {{ itemName }}?\": \"\",\r\n+ \"Force Delete\": \"\"," + ] + } + }, + "metadata": { + "tags": [ + "headlamp", + "sandbox", + "observability", + "kind-feature", + "a11y", + "i18n", + "cncf-cla--yes", + "needs-rebase", + "size-xs" + ], + "category": "observability", + "cncfProjects": [ + "headlamp" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes-sigs/headlamp/pull/3239", + "sourceRepo": "kubernetes-sigs/headlamp", + "reactions": 3, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:48:36.273Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/headlamp/headlamp-3664-frontend-resourcetable-add-sort-column-and-direction-per-view-memo.json b/solutions/cncf-generated/headlamp/headlamp-3664-frontend-resourcetable-add-sort-column-and-direction-per-view-memo.json new file mode 100644 index 00000000..26cdcf0d --- /dev/null +++ b/solutions/cncf-generated/headlamp/headlamp-3664-frontend-resourcetable-add-sort-column-and-direction-per-view-memo.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:37.315Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "headlamp: frontend: ResourceTable: Add sort column and direction per view memory", + "description": "## Summary\n\nThis PR ensure that each table remembers its last sorting column and direction across navigation, with support for multi-column sorting.\n\nFixes #3639 \n\n## Changes Made to ResourceTable.tsx\nAdded localStorage functions for sorting persistence:\n\nAdded storeSortingSettings function to save sorting state to localStorage\nAdded loadSortingSettings function to retrieve saved sorting state from localStorage\nAdded sorting state management:\n\nAdded sorting state with useState that initializes from localStorage on component mount\nState tracks current sorting configuration for the table\nUpdated sort initialization logic:\n\nModified sorting logic to prioritize current state, then persisted settings, then default fallback\nAdded sorting dependency to the useMemo array for proper re-rendering\nAdded sorting change handler and state integration:\n\nCreated onSortingChange function to persist sorting changes to localStorage\nAdded sorting to Table component state prop\nAdded onSortingChange to Tabl", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "\"CLA

The committers listed above are authorized under a signed CLA.
  • :white_check_mark: login: krrish-sehgal / name: Krrish Sehgal (05de512f1e03f46c9e961c72cfc405d42868bde3)
  • :white_check_mark: login: illume / name: René Dudfield (e466620b952d4482bf948e88139ad32a26a8672a)
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "headlamp", + "sandbox", + "observability", + "kind-feature", + "frontend", + "a11y", + "table", + "cncf-cla--yes", + "size-xl", + "approved" + ], + "category": "observability", + "cncfProjects": [ + "headlamp" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kubernetes-sigs/headlamp/pull/3664", + "sourceRepo": "kubernetes-sigs/headlamp", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:37.315Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/headlamp/headlamp-3692-backend-headlamp-add-oidc-pkce-support.json b/solutions/cncf-generated/headlamp/headlamp-3692-backend-headlamp-add-oidc-pkce-support.json new file mode 100644 index 00000000..ef193753 --- /dev/null +++ b/solutions/cncf-generated/headlamp/headlamp-3692-backend-headlamp-add-oidc-pkce-support.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:29.954Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "headlamp: backend: headlamp: Add OIDC PKCE support", + "description": "## Summary\n\nAdd PKCE support for OIDC authentication flow\n\n## Related Issue\n\nFixes #3137\n\n## Changes\n\n- Added PKCE configuration: Newflag (default: false) to enable/disable PKCE\n- Updated Config struct: Added OidcUsePKCE field to support PKCE configuration\n- Implemented PKCE cryptographic functions\n- Enhanced OauthConfig struct: Added field to store PKCE verifier", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "\"CLA

The committers listed above are authorized under a signed CLA.
  • :white_check_mark: login: k-airos / name: k-airos (5f57e7fb32a1f07dec4e1d82eaddc2d68b183393, 9d37906abf5992f37262050518168c345e130e6f)
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Testing Helm chart templates against expected output...\r\nError: parse error at (headlamp/templates/deployment.yaml:227): undefined variable \"$usePKCE\"\r\n\r\nUse --debug flag to render out invalid YAML" + ] + } + }, + "metadata": { + "tags": [ + "headlamp", + "sandbox", + "observability", + "kind-feature", + "backend", + "security", + "oidc", + "cncf-cla--yes", + "approved", + "size-l" + ], + "category": "observability", + "cncfProjects": [ + "headlamp" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kubernetes-sigs/headlamp/pull/3692", + "sourceRepo": "kubernetes-sigs/headlamp", + "reactions": 11, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:48:29.954Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/headlamp/headlamp-4434-frontend-display-a8r-io-service-metadata-in-service-views.json b/solutions/cncf-generated/headlamp/headlamp-4434-frontend-display-a8r-io-service-metadata-in-service-views.json new file mode 100644 index 00000000..35b98bfe --- /dev/null +++ b/solutions/cncf-generated/headlamp/headlamp-4434-frontend-display-a8r-io-service-metadata-in-service-views.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:35.223Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "headlamp: frontend: Display a8r.io service metadata in service views", + "description": "## Summary\nAdds UI support for a8r.io service discovery annotations in Headlamp.\n\n## Related Issue\nFixes #4377\n\n## Changes\n- Added a \"Service Information\" section to the Service details view to display a8r.io annotations\n- Displayed text-based annotations such as owner, description, and dependencies\n- Rendered link-based annotations (logs, documentation, repository, chat) as actionable links\n- Added an optional Owner column in the Service list view\n- Added row actions for logs, documentation, repository, and chat when valid URLs are present\n\n## Steps to Test\n1. Create or use a Service with a8r.io annotations (e.g. owner, logs, repository).\n2. Open the Service details page and verify the \"Service Information\" section is rendered correctly.\n3. Open the Services list view and verify the Owner column is populated when annotations exist.\n4. Open the row actions menu and verify links appear only for valid URLs.\n\n## Screenshots\n**Service list view with optional Owner column populated via a8r.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@mastermaxx03 Thank you so much for this excellent PR!\nI've just finished reading through the code.\n\nI'd like to share a few concerns I noticed:\nThere's a discrepancy in the number of icons shown between Details and List views, and the processing logic appears to be implemented as completely separate systems.\nHowever, upon carefully reviewing the code, I believe these two implementations are likely performing very similar operations, and I don't think there's a need to have a difference in the number of buttons displayed.\n\nRather than duplicating the entire processing logic again in the List view, would you consider refactoring the A8RServiceInfo side to organize the processing, and then leveraging that shared logic for both Details and List views?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "> frontend: App: service: Add a8r.io service metadata support and icons\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "headlamp", + "sandbox", + "observability", + "cncf-cla--yes", + "approved", + "size-xxl" + ], + "category": "observability", + "cncfProjects": [ + "headlamp" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes-sigs/headlamp/pull/4434", + "sourceRepo": "kubernetes-sigs/headlamp", + "reactions": 3, + "comments": 25 + }, + "security": { + "scannedAt": "2026-02-27T17:48:35.223Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/headlamp/headlamp-4458-backend-allow-configuring-log-level-via-environment-variable.json b/solutions/cncf-generated/headlamp/headlamp-4458-backend-allow-configuring-log-level-via-environment-variable.json new file mode 100644 index 00000000..b409b20b --- /dev/null +++ b/solutions/cncf-generated/headlamp/headlamp-4458-backend-allow-configuring-log-level-via-environment-variable.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:38.383Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "headlamp: backend: Allow configuring log level via environment variable", + "description": "## Summary\n\nThis PR adds the ability to configure the Headlamp backend log level via the `HEADLAMP_CONFIG_LOG_LEVEL` environment variable. \n\n## Related Issue\n\nFixes #4139 \n\n## Changes\n\n- Added a backend configuration flag `--log-level` with environment variable support via `HEADLAMP_CONFIG_LOG_LEVEL`.\n- Ensured invalid, empty, or missing log level values safely fall back to` info`.\n- Added unit tests for both flag and environment based log level configuration.\n- Updated backend and development documentation to describe log level configuration.\n\n## Steps to Test\n\n1. Go backend folder:\n ```bash\n cd backend\n2. Run the following command:\n ```bash\n npm run backend:start -- --log-level debug\n \nIf it doesnt work, build the backend again using `npm run backend:build` and follow the above steps.\n\n## Screenshots (if applicable)\n\n1. Debug log level\n\"SCR-20260202-tydl\"\"CLA

The committers listed above are authorized under a signed CLA.
  • :white_check_mark: login: aadhil2k4 / name: Aadhil Ahamed (eff979856d1e2032bbe852740252457d8089061a)
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "cd backend\r\n2. Run the following command:" + ] + } + }, + "metadata": { + "tags": [ + "headlamp", + "sandbox", + "observability", + "lgtm", + "cncf-cla--yes", + "approved", + "size-l" + ], + "category": "observability", + "cncfProjects": [ + "headlamp" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kubernetes-sigs/headlamp/pull/4458", + "sourceRepo": "kubernetes-sigs/headlamp", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:48:38.383Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-10603-trigger-hook-delete-policy-after-log-retrieval-for-helm-test.json b/solutions/cncf-generated/helm/helm-10603-trigger-hook-delete-policy-after-log-retrieval-for-helm-test.json new file mode 100644 index 00000000..247dce57 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-10603-trigger-hook-delete-policy-after-log-retrieval-for-helm-test.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:37.898Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: Trigger hook-delete-policy after log retrieval for helm test", + "description": "**What this PR does / why we need it**:\n\nCloses #9098 **Helm test --logs fails with hook-delete-policy \"hook-failed\" or \"hook-succeed\"**\n\nIf we want to run `helm test --logs`, this results in an error like:\n````\n$ helm --namespace test validation --logs\nNAME: validation\nLAST DEPLOYED: Sat Jan 22 22:43:19 2022\nNAMESPACE: test\nSTATUS: deployed\nREVISION: 2\nTEST SUITE: test-secrets-store\nLast Started: Sat Jan 22 22:43:25 2022\nLast Completed: Sat Jan 22 22:43:27 2022\nPhase: Succeeded\n\nError: unable to get pod logs for test-secrets-store: pods \"test-secrets-store\" not found\n````\n\nWhen calling `helm test`, the following functions are called in the code:\n1. `helm test` results in a call of `newReleaseTestCmd()` in `helm\\release_testing.go`\n2. `newReleaseTestCmd()` results in a call of `Run(args[0])` in `action\\release_testing.go`\n3. `Run(args[0])` results in a call of `execHook(rel, release.HookTest, r.Timeout)` in `hooks.go`\n4. `execHook(rel, release.HookTest, r.Timeout)` res", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## WHAT\n\nAdding [Hook deletion policies](https://helm.sh/docs/topics/charts_hooks/#hook-deletion-policies) for the Helm tests.\n\n## WHY\n\nWhen running tests with Helm the pods will not be tidied up.\nWhen the annotation `\"helm.sh/hook-delete-policy\": hook-succeeded` is added, after a successful run the pod will be deleted afterwards.\nIf the pod fails it will still be present.\n\nAlso the value `before-hook-creation` was added to be able to run the test again if it failed without manually deleting the pod.\n\n
\n\nMarco Lecheler [marco.lecheler@mercedes-benz.com](mailto:marco.lecheler@mercedes-benz.com) Mercedes-Benz Tech Innovation GmbH ([ProviderInformation](https://github.com/mercedes-benz/foss/blob/master/PROVIDER_INFORMATION.md))", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ helm --namespace test validation --logs\r\nNAME: validation\r\nLAST DEPLOYED: Sat Jan 22 22:43:19 2022\r\nNAMESPACE: test\r\nSTATUS: deployed\r\nREVISION: 2\r\nTEST SUITE: test-secrets-store\r\nLast Started: Sat Jan 22 22:43:25 2022\r\nLast Completed: Sat Jan 22 22:43:27 2022\r\nPhase: Succeeded\r\n\r\nError: unable to get pod logs for test-secrets-store: pods \"test-secrets-store\" not found" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-m", + "needs-rebase", + "bug" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Pod", + "Secret", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/helm/helm/pull/10603", + "sourceRepo": "helm/helm", + "reactions": 25, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:47:37.898Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-11440-fix-null-key-removal-in-nested-subcharts.json b/solutions/cncf-generated/helm/helm-11440-fix-null-key-removal-in-nested-subcharts.json new file mode 100644 index 00000000..4481d2f5 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-11440-fix-null-key-removal-in-nested-subcharts.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:21.150Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: Fix null key removal in nested subcharts", + "description": "closes #9136\ncloses #9027\n\nThis PR contains two commits, the first replicates the behaviour described in #9136 and the second resolves it.\n\nThe original unit-tests did have a case for the subchart values and it was passing, so I spent some time trying to work out why there is a difference between the behaviour of the unit-tests and real world usage. It seems that the bug is related to the way the chart object is mutated by chartutil.ProcessDependencies. During that call the chart objects values are merged with its subcharts leading to a situation where all values are merged together into the parent chart's values.\n\nTo replicate this behaviour in the unit tests I just added the following to the parent charts definition:\n\n```\n\t\t\t\"pequod\": map[string]interface{}{\n\t\t\t\t\"ahab\": map[string]interface{}{\n\t\t\t\t\t\"scope\": \"ahab\",\n\t\t\t\t\t\"nested\": map[string]interface{}{\n\t\t\t\t\t\t\"foo\": true,\n\t\t\t\t\t\t\"bar\": true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n```\n\nSince `bar` is now defined in that parent chart object, when t", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What this PR does / why we need it**:\n\ncloses #9136 \n\n**Special notes for your reviewer**:\n\n**If applicable**:\n- [ ] this PR contains documentation\n- [x] this PR contains unit tests\n- [ ] this PR has been tested for backwards compatibility - **Need to verify import values are not broken**", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\"pequod\": map[string]interface{}{\r\n\t\t\t\t\"ahab\": map[string]interface{}{\r\n\t\t\t\t\t\"scope\": \"ahab\",\r\n\t\t\t\t\t\"nested\": map[string]interface{}{\r\n\t\t\t\t\t\t\"foo\": true,\r\n\t\t\t\t\t\t\"bar\": true,\r\n\t\t\t\t\t},\r\n\t\t\t\t},\r\n\t\t\t},", + "resources:\r\n limits:\r\n cpu: 100m\r\n memory: 128Mi\r\n requests:\r\n cpu: 100m\r\n memory: 128Mi", + "subchart:\r\n resources:\r\n requests:\r\n cpu: 1000m\r\n memory: 2Gi\r\n limits:\r\n cpu: null\r\n memory: 2Gi" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-l", + "bug" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/helm/helm/pull/11440", + "sourceRepo": "helm/helm", + "reactions": 50, + "comments": 47 + }, + "security": { + "scannedAt": "2026-02-27T17:47:21.150Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-11760-fail-strict-lints-when-unused-values-are-provided.json b/solutions/cncf-generated/helm/helm-11760-fail-strict-lints-when-unused-values-are-provided.json new file mode 100644 index 00000000..c8051022 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-11760-fail-strict-lints-when-unused-values-are-provided.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:38.979Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: Fail strict lints when unused values are provided", + "description": "**What this PR does / why we need it**:\n\ncloses #6422 (Which actually closed due to inactivity, but I'm implementing a fix here)\n\nWhen an unused value is passed into the chart, a strict lint will throw an error. For example, given the chart, a `helm lint --strict` will fail.\n\n```\n# templates/configmap.yaml\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: example-chart\n namespace: example-chart\n labels:\n team: infrastructure\n chart_name: {{ $.Chart.Name }}\n chart_version: {{ $.Chart.Version }}\n annotations:\n deleteme: \"if found\"\n```\n\n```\n# values.yaml\nsuper:\n unused: REQUIRED\n value:\n that: REQUIRED\n is_not: REQUIRED\n used:\n ever: REQUIRED\n```\n\nHere's an example output of what'll happen with the extra, unused values.\n\n```\n\n❯ /usr/local/bin/helm lint --strict example-chart\n==> Linting example-chart\n[INFO] Chart.yaml: icon is recommended\n[ERROR] templates/: there are unused fields in values files [.Values.super.unused .Values.super.used.ever .Values.super.value.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is a great initiative! We're very interested in being able to take advantage of such a feature.\nWhat's the status of this work? Is it blocked? Abandoned?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# templates/configmap.yaml\r\nkind: ConfigMap\r\napiVersion: v1\r\nmetadata:\r\n name: example-chart\r\n namespace: example-chart\r\n labels:\r\n team: infrastructure\r\n chart_name: {{ $.Chart.Name }}\r\n chart_version: {{ $.Chart.Version }}\r\n annotations:\r\n deleteme: \"if found\"", + "# values.yaml\r\nsuper:\r\n unused: REQUIRED\r\n value:\r\n that: REQUIRED\r\n is_not: REQUIRED\r\n used:\r\n ever: REQUIRED", + "❯ /usr/local/bin/helm lint --strict example-chart\r\n==> Linting example-chart\r\n[INFO] Chart.yaml: icon is recommended\r\n[ERROR] templates/: there are unused fields in values files [.Values.super.unused .Values.super.used.ever .Values.super.value.is_not .Values.super.value.that]\r\n\r\nError: 1 chart(s) linted, 1 chart(s) failed" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Deployment", + "Configmap", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/helm/helm/pull/11760", + "sourceRepo": "helm/helm", + "reactions": 24, + "comments": 29 + }, + "security": { + "scannedAt": "2026-02-27T17:47:38.979Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-12541-add-support-for-weighted-manifests.json b/solutions/cncf-generated/helm/helm-12541-add-support-for-weighted-manifests.json new file mode 100644 index 00000000..bf16ff8b --- /dev/null +++ b/solutions/cncf-generated/helm/helm-12541-add-support-for-weighted-manifests.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:26.686Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: add support for weighted manifests", + "description": "**What this PR does / why we need it**:\nAllows overriding the default manifest sort order by annotation.\nCloses https://github.com/helm/helm/issues/8439\n\n* The annotation: `helm.sh/order-weight`, if defined in a given manifest will influence the sorted order.\n* Lower weighted manifests will be applied prior to higher weighted manifests.\n* A value of 0 is assumed if the annotation is missing or not parseable.\n* Manifests of the same order weight will leverage the existing sorting logic.\n\n**Special notes for your reviewer**:\n\nPrior work:\n- https://github.com/helm/helm/pull/9534\n- https://github.com/helm/helm/pull/8448\n\n**If applicable**:\n- [ ] this PR contains documentation\n- [x] this PR contains unit tests\n- [x] this PR has been tested for backwards compatibility", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "See \"H4HIP: Helm Sequencing Proposal\" https://github.com/helm/community/pull/373", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "feature", + "size-l", + "stale" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/helm/helm/pull/12541", + "sourceRepo": "helm/helm", + "reactions": 32, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:47:26.686Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-12743-feat-helm-add-skip-schema-validation-flag-to-helm-install-uprade-and-.json b/solutions/cncf-generated/helm/helm-12743-feat-helm-add-skip-schema-validation-flag-to-helm-install-uprade-and-.json new file mode 100644 index 00000000..40458cca --- /dev/null +++ b/solutions/cncf-generated/helm/helm-12743-feat-helm-add-skip-schema-validation-flag-to-helm-install-uprade-and-.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:36.304Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: feat(helm): add --skip-schema-validation flag to helm 'install', 'uprade' and 'lint'", + "description": "**What this PR does / why we need it**:\n\nWhen --skip-schema-validation is set, any schema contained in the helm chart is ignored. Defaults to 'false'.\n\nCloses #10398\n\n**Special notes for your reviewer**:\n\nI tried to keep the code in the `pkg` folder backwards compatible.\n\nSupersedes https://github.com/helm/helm/pull/11510\n\nDocumentation added in https://github.com/helm/helm-www/pull/1549\n\n**If applicable**:\n- [x] this PR contains documentation\n- [x] this PR contains unit tests\n- [x] this PR has been tested for backwards compatibility\n\n----\n### maintainer addition\n- closes #11510", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "**What this PR does / why we need it**:\nAdds a \"--skip-schema-validation\" flag option for helm install, upgrade, lint, and template that skips validation of the values.schema.json file.\n\ncloses https://github.com/helm/helm/issues/10398\n\n**Special notes for your reviewer**:\nThis is my first time contributing, so please let me know if there is something I am doing wrong : )\n\n**If applicable**:\n- [ ] this PR contains documentation\n- [ ] this PR contains unit tests\n- [ ] this PR has been tested for backwards compatibility\n\nSigned-off-by: Jeff van Dam ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/helm/helm/pull/12743", + "sourceRepo": "helm/helm", + "reactions": 25, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:47:36.304Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-12879-bugfix-override-subcharts-with-null-values.json b/solutions/cncf-generated/helm/helm-12879-bugfix-override-subcharts-with-null-values.json new file mode 100644 index 00000000..2588c039 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-12879-bugfix-override-subcharts-with-null-values.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:14.703Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: bugfix: Override subcharts with null values", + "description": "This PR closes #12469 and closes #12488\n\nHelm should allow users to not only override default values, but also completely remove any default values by setting a config to `null`. \n\nThis works fine for regular charts, but default values within sub-charts cannot be `null`-ed. The linked issue has a good example of this created by user \"naemono.\" \n\nThe reason this issue is happening is because the `coalesce` function goes over sub-chart values that are defined in a values file or with a `--set` flag twice due to the logic [here](https://github.com/helm/helm/blob/d37e2e9097f9715d4d184b2e3cc313588f106030/pkg/chartutil/coalesce.go#L100-L101). `merge` is always set to `false` in this context, and the first time `coalesce` gets called, the null value gets removed during the `coalesceTablesFullKey` function [here.](https://github.com/helm/helm/blob/d37e2e9097f9715d4d184b2e3cc313588f106030/pkg/chartutil/coalesce.go#L279) This is fine for regular chart values, but for sub-chart values, the `co", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hello !\n\nCPU limits shouldn't be set by default on subcharts and users should be able to configure the pods without them.\n\nHelm isn't able to nullify subcharts by-default values: https://github.com/helm/helm/pull/12879\n\nThis modification simply passes the default configuration of CPU and memory limits from the sub-charts to the main chart. I've kept the basic values as a recommendation, but the values are now nullifiable in a custom values.yaml:\n\n```yaml\nworker:\n app:\n resources:\n limits:\n cpu: null\n memory: 1500Mi\n requests:\n cpu: 50m\n memory: 100Mi\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "worker:\r\n app:\r\n resources:\r\n limits:\r\n cpu: null\r\n memory: 1500Mi\r\n requests:\r\n cpu: 50m\r\n memory: 100Mi" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-m", + "bug" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/helm/helm/pull/12879", + "sourceRepo": "helm/helm", + "reactions": 119, + "comments": 65 + }, + "security": { + "scannedAt": "2026-02-27T17:47:14.703Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-4468-add-an-option-which-builds-recursively-the-dependent-charts.json b/solutions/cncf-generated/helm/helm-4468-add-an-option-which-builds-recursively-the-dependent-charts.json new file mode 100644 index 00000000..46d1891e --- /dev/null +++ b/solutions/cncf-generated/helm/helm-4468-add-an-option-which-builds-recursively-the-dependent-charts.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:41.992Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: Add an option which builds recursively the dependent charts", + "description": "This is based on PR #2278. \n\nThe recursive build is now implemented iteratively and also some tests are added.\n\nIt was also tested manually on a more complex chart https://github.com/ccojocar/environment-scalewalnut-staging/tree/master/env.\n\nfixes #2247\n\ncc @jstrachan", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What this PR does / why we need it**:\nAdds a [kong ingress controller](https://github.com/Kong/kubernetes-ingress-controller) deployment to the chart. This enables the management of kong using custom resources.\n\n**Which issue this PR fixes**:\n fixes Kong/kubernetes-ingress-controller/issues/36\n\n**Special notes for your reviewer**:\nIt's a first approach, it's working for us, please, I'm open to suggestions and reviews.\n\nIt's limited to Kong CE 0.13.X, but support for 0.14.0 was merged a couple of days. @shashiranjan84, can you release a new version of the docker image ? \n\n@hbagdi, I wasn't able to make it work when the kong-admin port is secured, is this the expeceted behaviour ? It makes sense to me, but I wanted to check first:\n\n```\ningress-controller:\n main.go:144] Get https://localhost:8444/: x509: certificate signed by unknown authority\n```\n\nThanks !", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "ingress-controller:\r\n main.go:144] Get https://localhost:8444/: x509: certificate signed by unknown authority", + "dependencies:\r\n - name: redis\r\n version: \"3.6.4\"\r\n repository: \"@stable\"\r\n - name: alpine\r\n version: \"0.1.0\"\r\n repository: \"file://../alpine\"", + "Hang tight while we grab the latest from your chart repositories...\r\n...Unable to get an update from the \"local\" chart repository (http://127.0.0.1:8879/charts):\r\n\tGet http://127.0.0.1:8879/charts/index.yaml: dial tcp 127.0.0.1:8879: connect: connection refused\r\n...Successfully got an update from the \"incubator\" chart repository\r\n...Successfully got an update from the \"stable\" chart repository\r\nUpdate Complete. ⎈Happy Helming!⎈\r\nSaving 2 charts\r\nDownloading redis from repo https://kubernetes-charts.storage.googleapis.com\r\nUnpacking: charts/redis-3.6.4.tgz\r\nDeleting outdated charts\r\nNo requirements found in charts/redis/charts.\r\nError: could not find charts/alpine: stat charts/alpine: no such file or directory" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "feature", + "in-progress", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/helm/helm/pull/4468", + "sourceRepo": "helm/helm", + "reactions": 21, + "comments": 32 + }, + "security": { + "scannedAt": "2026-02-27T17:47:41.992Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-5112-fix-helm-wait-for-crds-to-reach-established-state-for-crd-install-hook.json b/solutions/cncf-generated/helm/helm-5112-fix-helm-wait-for-crds-to-reach-established-state-for-crd-install-hook.json new file mode 100644 index 00000000..68352c64 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-5112-fix-helm-wait-for-crds-to-reach-established-state-for-crd-install-hook.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:33.221Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: fix(helm): Wait for CRDs to reach established state for crd_install hook", + "description": "**What this PR does / why we need it**:\nThere is a race condition in the crd_install hook implementation, where there is a chance a CRD is not yet ready by the time CRs are being created. This is reported in issue #4925. This change makes sure CRDs installed through the crd_install hook reaches the `established` state before the hook is considered complete.\n\nFixes #4925\n\n**Special notes for your reviewer**:\nUnit-testing code in the kubernetes client is difficult, as the builder/infos is tightly coupled with the API server. I will look into how to improve testing for this part of the codebase, but I would like to separate it from this PR.\n\n**If applicable**:\n- [x] this PR contains documentation\n- [x] this PR contains unit tests\n- [x] this PR has been tested for backwards compatibility", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "#### What this PR does / why we need it:\n\nThe prometheus-operator helm chart has a flag which decides whether or not to create the CRDs: `prometheusOperator.createCustomResource`.\n\nHowever, even when a user sets this flag to false, the prometheus operator itself will go ahead and create/update the existing CRDs during controller startup via this logic:\n\nhttps://github.com/coreos/prometheus-operator/blob/1f2cf36582ced42e0c3bcb45a6f269d563d70eb3/pkg/prometheus/operator.go#L1445\n\nThe above logic creates or updates the existing prometheus CRDs which is not expected when setting prometheusOperator.createCustomResource=false.\n\nThis change adds the `--manage-crds=false` to the command line options to prometheus-operator so that when a user sets `prometheusOperator.createCustomResource=false`, no CRDs will be created (either by helm or by the operator). The decision to create CRDs or not is now fully controlled by the chart and not the operator. \n\n#### Special notes for your reviewer:\n\n#### Checklist\n[Place an '[x]' (no spaces) in all applicable fields. Please remove unrelated fields.]\n- [x] [DCO](https://github.com/helm/charts/blob/master/CONTRIBUTING.md#sign-your-work) signed\n- [x] Chart Version bumped\n- [x] Variables are documented in the README.md", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-l", + "v3-port-complete" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "expert", + "sourceIssue": "https://github.com/helm/helm/pull/5112", + "sourceRepo": "helm/helm", + "reactions": 27, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:47:33.221Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-5371-storage-add-an-sql-storage-driver.json b/solutions/cncf-generated/helm/helm-5371-storage-add-an-sql-storage-driver.json new file mode 100644 index 00000000..f969fb0c --- /dev/null +++ b/solutions/cncf-generated/helm/helm-5371-storage-add-an-sql-storage-driver.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:23.933Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: [storage] Add an SQL storage driver", + "description": "**What this PR does / why we need it**:\n\nThis commits adds the possibility to back Tiller (or the future\nTiller-less Helm CLI) with any SQL database (only postgres has been\ntested so far) to store release information.\n\nThe main motivation for this commit was to use a storage backend that\nwould allow releases larger that 1MB in size (ConfigMap or Secret\ndrivers don't, because of limits on value size in the underlying etcd\nkey-value store).\n\nSigned-off-by: Étienne Lafarge \n\nCo-authored-by: Elliot Maincourt (@emaincourt)\nCo-authored-by: Paul Borensztein (@commit-master)\n\n**Special notes for the reviewer(s)**:\n\n* It goes without saying that we'll be glad to maintain and update this part of the codebase - in particular - to comply with the new Tiller-less Helm 3 ;-) \n* Also, we'll be glad to write an `any backend -> postgres` migration script if this backend gets promoted to GA at some point.\n* Only `postgresql` has been tested", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "He's currently on vacation for the next week. I'd try asking in #helm-dev on Slack and ask if another maintainer would be willing to review this.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-xl" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Configmap", + "Secret" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/helm/helm/pull/5371", + "sourceRepo": "helm/helm", + "reactions": 39, + "comments": 24 + }, + "security": { + "scannedAt": "2026-02-27T17:47:23.933Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-5492-feat-helm-add-app-version-flag-to-helm-install-upgrade.json b/solutions/cncf-generated/helm/helm-5492-feat-helm-add-app-version-flag-to-helm-install-upgrade.json new file mode 100644 index 00000000..00f8ede3 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-5492-feat-helm-add-app-version-flag-to-helm-install-upgrade.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:23.009Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: feat(helm): add --app-version flag to 'helm install/upgrade'", + "description": "Closes #3555\n\nNew pull request for https://github.com/helm/helm/pull/4961, sorry about that\n\n**If applicable**:\n- [X] this PR contains documentation\n- [x] this PR contains unit tests", + "type": "upgrade", + "status": "completed", + "resolution": { + "summary": "When 'helm install/upgrade --app-version 1.0.0' is run, this will override the chart app version\n\nCloses #3555\n\nSigned-off-by: Kevin Labesse \n\nWaiting for your feedback, not 100% sure is the most efficient way do to that", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "#!/bin/bash -eo pipefail\r\n.circleci/test.sh\r\nRunning 'make build'\r\nGOBIN=/go/src/k8s.io/helm/bin go install -tags '' -ldflags '-w -s -X k8s.io/helm/pkg/version.GitCommit=b5f34b33bb0a61800ed83066f9654c5b5fa29ed0 -X k8s.io/helm/pkg/version.GitTreeState=clean' k8s.io/helm/cmd/...\r\nRunning 'make verify-docs'\r\nGOBIN=/go/src/k8s.io/helm/bin go install -tags '' -ldflags '-w -s -X k8s.io/helm/pkg/version.GitCommit=b5f34b33bb0a61800ed83066f9654c5b5fa29ed0 -X k8s.io/helm/pkg/version.GitTreeState=clean' k8s.io/helm/cmd/...\r\nCreating /root/.helm \r\nCreating /root/.helm/repository \r\nCreating /root/.helm/repository/cache \r\nCreating /root/.helm/repository/local \r\nCreating /root/.helm/plugins \r\nCreating /root/.helm/starters \r\nCreating /root/.helm/cache/archive \r\nCreating /root/.helm/repository/repositories.yaml \r\nAdding stable repo with URL: https://kubernetes-charts.storage.googleapis.com \r\nAdding local repo with URL: http://127.0.0.1:8879/charts \r\n$HELM_HOME has been configured at /root/.helm.\r\nNot installing Tiller due to 'client-only' flag having been set\r\n--- /tmp/tmp.xDUewHmInp/docs/helm/helm_install.md\t2019-06-07 13:13:04.965128551 +0000\r\n+++ docs/helm/helm_install.md\t2019-06-07 13:10:32.990535551 +0000\r\n@@ -83,7 +83,7 @@ helm install [CHART] [flags]\r\n --ca-file string Verify certificates of HTTPS-enabled servers using this CA bundle\r\n --cert-file string Identify HTTPS client using this SSL certificate file\r\n --dep-up Run helm dependency update before installing the chart\r\n- --description string Specify a description for the release\r\n+ --description string specify a description for the release\r\n --devel Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.\r\n --dry-run Simulate an install\r\n -h, --help help for install\r\nhelm docs are out of date. Please run \"make docs\"\r\nMakefile:150: recipe for target 'verify-docs' failed\r\nmake: *** [verify-docs] Error 1\r\nExited with code 2", + "{{ default .Values.appVersion .Chart.AppVersion }}", + "{{ default .Values.appVersion .Chart.AppVersion }}" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "in-progress", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/helm/helm/pull/5492", + "sourceRepo": "helm/helm", + "reactions": 43, + "comments": 25 + }, + "security": { + "scannedAt": "2026-02-27T17:47:23.009Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-7417-update-values-yaml-linting-to-respect-value-options-flags.json b/solutions/cncf-generated/helm/helm-7417-update-values-yaml-linting-to-respect-value-options-flags.json new file mode 100644 index 00000000..668905ef --- /dev/null +++ b/solutions/cncf-generated/helm/helm-7417-update-values-yaml-linting-to-respect-value-options-flags.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:31.561Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: Update values.yaml linting to respect value options flags", + "description": "Previously, the lint command ignored values passed via value options\nflags when linting a chart's `values.yaml` file. This was problematic\nbecause such values are often required when validating against JSON\nschema\n\nCloses #7273\n\nSigned-off-by: Robbie deMuth ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "When support for `helm lint --values` was added, this support did not extend to the values file tests. This PR extends the support to values files, which lets the user schema-test the combination of the chart and the passed-in values.\n\nAlso added tests for other values lints.\n\nCloses #7756 \n\nSigned-off-by: Matt Butcher ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "awaiting-review", + "size-m", + "v3-x" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/helm/helm/pull/7417", + "sourceRepo": "helm/helm", + "reactions": 27, + "comments": 9 + }, + "security": { + "scannedAt": "2026-02-27T17:47:31.561Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-7431-feat-helm-add-recreate-upgrade-rollback-strategy.json b/solutions/cncf-generated/helm/helm-7431-feat-helm-add-recreate-upgrade-rollback-strategy.json new file mode 100644 index 00000000..76c3870d --- /dev/null +++ b/solutions/cncf-generated/helm/helm-7431-feat-helm-add-recreate-upgrade-rollback-strategy.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:19.357Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: feat(helm): add recreate upgrade (rollback) strategy", + "description": "An additional optional flag `--recreate` can be passed on upgrade (or rollback) of a release.\nIn combination with the `--force` flag the following strategies are employed when updating a resource (which can be directly compared to kubectl):\n\n```\nhelm kubectl action on 'invalid' or 'conflict'\n--------------------------------------------------------------------------------------------------------------\n\nupgrade apply PATCH fail\n\nupgrade --force replace PUT fail\n\nupgrade --recreate apply --force PATCH DELETE -> GET (poll) -> POST\n\nupgrade --recreate --force replace --force DELETE -> GET (poll) -> POST fail\n```\n\nThe 'on error' column should be interpreted as follows. The server responds with 'invalid' e.g. if a certain resource contains an immutable field, which cannot be patched or updated b", + "type": "upgrade", + "status": "completed", + "resolution": { + "summary": "This PR changes the behavior of \"helm upgrade\" command when there have been no successful and at least one failed release. This allows the user to recover from a failed install (or a partially failed install) without uninstalling the release. The user can now decide to decide to ignore the \"no deployed releases\" errors and allow it to upgrade a failed release.\n\nThere were no test cases that tested the failure scenarios of upgrading a failed/missing release so I added them and also added additional test cases that test the new functionality.\n\nFixes issue #5595\n\nSigned-off-by: Matt Morrissete ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "helm kubectl action on 'invalid' or 'conflict'\r\n--------------------------------------------------------------------------------------------------------------\r\n\r\nupgrade apply PATCH fail\r\n\r\nupgrade --force replace PUT fail\r\n\r\nupgrade --recreate apply --force PATCH DELETE -> GET (poll) -> POST\r\n\r\nupgrade --recreate --force replace --force DELETE -> GET (poll) -> POST fail", + "Update(original, target ResourceList, force bool) (*Result, error)", + "Update(original, target ResourceList, options UpdateOptions) (*Result, error)" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "feature", + "size-xl", + "v3-x" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Service", + "Storageclass", + "Role", + "Rolebinding" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/helm/helm/pull/7431", + "sourceRepo": "helm/helm", + "reactions": 52, + "comments": 82 + }, + "security": { + "scannedAt": "2026-02-27T17:47:19.357Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-7649-adopt-resources-into-release-with-correct-instance-and-managed-by-labe.json b/solutions/cncf-generated/helm/helm-7649-adopt-resources-into-release-with-correct-instance-and-managed-by-labe.json new file mode 100644 index 00000000..022a762b --- /dev/null +++ b/solutions/cncf-generated/helm/helm-7649-adopt-resources-into-release-with-correct-instance-and-managed-by-labe.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:12.549Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: Adopt resources into release with correct instance and managed-by labels", + "description": "Alternative implementation to https://github.com/helm/helm/pull/7627, https://github.com/helm/helm/pull/7625, and https://github.com/helm/helm/pull/7575.\n\nHelp with validation would be appreciated, but in theory this closes https://github.com/helm/helm/issues/6850, closes https://github.com/helm/helm/issues/4824, closes https://github.com/helm/helm/issues/2947, and closes https://github.com/helm/helm/issues/7418. This implementation would also help make https://github.com/helm/helm/issues/2730 very approachable.\n\n## How it Works\n\n### ~Option A: Standard Labels~ https://github.com/helm/helm/pull/7649/commits/a29365b3c663f1c182ea78461825ae28b8573915\n\nHelm will assume that it has the right to \"adopt\" any existing Kubernetes resource containing the following labels:\n\n* `app.kubernetes.io/managed-by: Helm`\n* `app.kubernetes.io/instance: `\n\nThe major downside is that in order to get this functionality, setting these labels is required. Despite this having been a [documented bes", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Related to https://github.com/helm/helm/issues/6850, and an alternative implementation to https://github.com/helm/helm/pull/7575.\n\n### Steps to Reproduce\n\nCreate a chart with\n\n```yaml\napiVersion: apps/v1beta2\nkind: Deployment\n```\n\nDeploy a release of that chart, then switch to apps/v1:\n\n```diff\n- apiVersion: apps/v1beta2\n+ apiVersion: apps/v1\nkind: Deployment\n```\n\nRun `upgrade`, and you should get the following error before this patch:\n\n```\nrendered manifests contain a new resource that already exists. Unable to continue with update: existing resource conflict: namespace: default, name: version-migrate-test, existing_kind: apps/v1, Kind=Deployment, new_kind: apps/v1, Kind=Deployment\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "KIND=deployment\r\nNAME=my-app-staging\r\nRELEASE=staging\r\nNAMESPACE=default\r\nkubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE\r\nkubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE\r\nkubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm", + "apiVersion: apps/v1beta2\r\nkind: Deployment", + "Run `upgrade`, and you should get the following error before this patch:" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Deployment", + "Namespace" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/helm/helm/pull/7649", + "sourceRepo": "helm/helm", + "reactions": 127, + "comments": 56 + }, + "security": { + "scannedAt": "2026-02-27T17:47:12.549Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-7792-add-hook-parallelism-flag.json b/solutions/cncf-generated/helm/helm-7792-add-hook-parallelism-flag.json new file mode 100644 index 00000000..5477e2bd --- /dev/null +++ b/solutions/cncf-generated/helm/helm-7792-add-hook-parallelism-flag.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:34.521Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: Add --hook-parallelism flag", + "description": "Enables parallel execution of chart hooks. `--hook-parallelism` defaults to 1 to make this an opt-in feature.\n\nBatching is used to comply with `\"helm.sh/hook-weight\"` annotations. A semaphore is used to limit parallelism to the user-provided value. Some additional synchronization was necessary to run `cfg.recordRelease()` in a thread-safe manner.\n\nWIP: testing\n\nCloses #7763.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Enables parallel execution of chart hooks. --hook-parallelism defaults to 1 to make this an opt-in feature.\n \nBatching is used to comply with \"helm.sh/hook-weight\" annotations. A semaphore is used to limit parallelism\nto the user-provided value. Some additional synchronization was necessary to run cfg.recordRelease() in a\nthread-safe manner.\n \nBased off of https://github.com/helm/helm/pull/7792 by akhilles.\n \nCloses #7763.\n \nSigned-off-by: Andrew Baehre \n\n**What this PR does / why we need it**:\nAdds hook parallelism with a default of 1 as a flag for applicable commands.\n\n**Special notes for your reviewer**:\nThis was based on https://github.com/helm/helm/pull/7792. \nTests were added, but if you'd like more robust tests please let me know.\n\n**If applicable**:\n- [ ] this PR contains documentation\n- [x] this PR contains unit tests\n- [ ] this PR has been tested for backwards compatibility", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-l", + "wip" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/helm/helm/pull/7792", + "sourceRepo": "helm/helm", + "reactions": 27, + "comments": 6 + }, + "security": { + "scannedAt": "2026-02-27T17:47:34.521Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-8841-add-include-file-flag-for-external-files.json b/solutions/cncf-generated/helm/helm-8841-add-include-file-flag-for-external-files.json new file mode 100644 index 00000000..b2af1c92 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-8841-add-include-file-flag-for-external-files.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:16.316Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: Add include-file flag for external files ", + "description": "This PR implements the `--include-file` and `--include-dir` flags for the install, upgrade and template commands.\n\nThese flags load local files and make them available to the chart, so they can be used in templates with functions like `.Files.Get` or `.Files.Glob`.\n\nThis is my first PR here, and I would appreciate comments regarding style, code split (what goes where), and the amount and quality of tests that we need.\n\nThank you!\n\n**Parsing**\n\nBoth flags are arrays and can have multiple values, either comma-separated, or by writing the flag multiple times.\n\nBoth flags require a key and a path. They will get parsed just like `--set*`, and later values will overwrite previous ones. The paths are then added as if they are part of the chart; the keys represent the file names inside the chart.\n\nYou can use both flags in the same command. Files are parsed first, then directories.\n\n**Single files**\n\n```\nhelm template test . --include-file my_license.conf=../license.conf,foo=foo.txt --include-", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR implements the `--include-file` and `--include-dir` flags for the install, upgrade and template commands. \n\nThese flags load local files and make them available to the chart, so they can be used in templates with functions like `.Files.Get` or `.Files.Glob`.\n\nThis is my first PR here, and I would appreciate comments regarding style, code split (what goes where), and the amount and quality of tests that we need. \n\nThank you!\n\n**Parsing**\n\nBoth flags are arrays and can have multiple values, either comma-separated, or by writing the flag multiple times. \n\nBoth flags require a key and a path. They will get parsed just like `--set*`, and later values will overwrite previous ones. The paths are then added as if they are part of the chart; the keys represent the file names inside the chart.\n\nYou can use both flags in the same command. Files are parsed first, then directories.\n\n**Single files**\n\n```\nhelm template test . --include-file my_license.conf=../license.conf,foo=foo.txt --include-file bar=bar.txt --set license=my_license.conf\n\n# in chart/templates/cmap.yml\n{{ (.Files.Glob .Values.license).AsConfig | indent 2 }}\n```\n\n**Dirs**\n\nThe include-dir flag can include all the files from a local directory. It will not recurse in subdirectories.\n\n```\nhelm template test . --include-dir certs=../certs\n\n# in chart/templates/cmap.yml\n{{ (.Files.Glob \"certs/\").AsConfig | indent 2 }}\n```\n\n**Globs**\n\nThe `--include-dir` flag optionally supports a glob. It can recurse, but it will take e", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "helm template test . --include-file my_license.conf=../license.conf,foo=foo.txt --include-file bar=bar.txt --set license=my_license.conf\r\n\r\n# in chart/templates/cmap.yml\r\n{{ (.Files.Glob .Values.license).AsConfig | indent 2 }}", + "helm template test . --include-dir certs=../certs\r\n\r\n# in chart/templates/cmap.yml\r\n{{ (.Files.Glob \"certs/\").AsConfig | indent 2 }}", + "helm template test . --include-dir conf=../prod/conf/*.conf\r\n\r\n# in chart/templates/cmap.yml\r\n{{ (.Files.Glob \"conf/\").AsConfig | indent 2 }}" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-xl" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/helm/helm/pull/8841", + "sourceRepo": "helm/helm", + "reactions": 106, + "comments": 51 + }, + "security": { + "scannedAt": "2026-02-27T17:47:16.316Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-9426-feat-helm-add-ability-for-dry-run-to-do-lookup-functions.json b/solutions/cncf-generated/helm/helm-9426-feat-helm-add-ability-for-dry-run-to-do-lookup-functions.json new file mode 100644 index 00000000..a8cd1346 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-9426-feat-helm-add-ability-for-dry-run-to-do-lookup-functions.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:17.576Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: feat(helm): add ability for --dry-run to do lookup functions", + "description": "When a helm command is run with the --dry-run flag, it will try to connect to the cluster\nto be able to render lookup functions.\nCloses #8137\n\nSigned-off-by: Tapas Kapadia \n\n**What this PR does / why we need it**: It is hard to debug the lookup function and currently there is not a good way to test it with any flags. #8137 Stated that the `--dry-run` was fair game to try to implement this logic as long as the `helm template` logic stays the same.\n\n**Special notes for your reviewer**: This is my first PR for the Helm; please let me know if I need to add or change anything. \n\n**If applicable**:\n- [ ] this PR contains documentation\n- [ ] this PR contains unit tests\n- [ ] this PR has been tested for backwards compatibility", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "I second @joejulian said about `--dry-run='none'|'client'|'server'`. The PR here (as it stands) is a change in behavior. \n\nCurrently `helm install --dry-run=true` does not contact the cluster, and to the point of the security advisory/model ([GHSA-q8q8-93cv-v6h8](https://github.com/helm/helm/security/advisories/GHSA-q8q8-93cv-v6h8)), I think would be a breaking change for some users (ie. they could suddenly get secrets printed and logged in a CI system; see e.g. https://github.com/helm/helm/issues/7275)\n\nI put a proposal on the main issue (rather than this implementation PR [here](https://github.com/helm/helm/issues/8137#issuecomment-1365384675)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "--dry-run='none':\r\n\tMust be \"none\", \"server\", or \"client\". If client strategy, only print the object that\r\n\twould be sent, without sending it. If server strategy, submit server-side request without\r\n\tpersisting the resource.", + "W0919 10:06:16.181516 1786127 helpers.go:639] --dry-run is deprecated and can be replaced with --dry-run=client." + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-m" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/helm/helm/pull/9426", + "sourceRepo": "helm/helm", + "reactions": 95, + "comments": 51 + }, + "security": { + "scannedAt": "2026-02-27T17:47:17.576Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-9534-allow-configuring-install-order-of-custom-resources.json b/solutions/cncf-generated/helm/helm-9534-allow-configuring-install-order-of-custom-resources.json new file mode 100644 index 00000000..4d0b4aae --- /dev/null +++ b/solutions/cncf-generated/helm/helm-9534-allow-configuring-install-order-of-custom-resources.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:30.288Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: Allow configuring install order of Custom Resources", + "description": "**What this PR does / why we need it**:\n\nBy default Helm installs Kubernetes Kinds that doesn't exist in the internal [kube_sorter.go](https://github.com/helm/helm/blob/d55c53df4e394fb62b0514a09c57bce235dd7877/pkg/releaseutil/kind_sorter.go#L31-L66) at the very end of a helm installation. This causes some serious issues with Custom Resources that need to be deployed before specific other resources.\n\nThis PR introduces a new Annotation called `helm.sh/install-before`. With this annotation user can specify in which order this Custom Resource is actually installed. For example `helm.sh/install-before=\"Deployment,Service\"` means that this Resource will be installed **before** the Deployment resource as well as **before** the Service resource.\n\nHere is a example resource with this Annotation:\n\n```yaml\n# Source: generic-service/templates/securitygrouppolicy.yaml\napiVersion: vpcresources.k8s.aws/v1beta1\nkind: SecurityGroupPolicy\nmetadata:\n name: niklas-debug\n labels:\n helm.sh/chart: gene", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "This HIP will cover the option to customize the install order of custom resources. \n\nhttps://github.com/helm/helm/pull/9534", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# Source: generic-service/templates/securitygrouppolicy.yaml\r\napiVersion: vpcresources.k8s.aws/v1beta1\r\nkind: SecurityGroupPolicy\r\nmetadata:\r\n name: niklas-debug\r\n labels:\r\n helm.sh/chart: generic-service-0.5.0\r\n app.kubernetes.io/name: niklas-debug\r\n app.kubernetes.io/instance: niklas-debug\r\n app.kubernetes.io/version: \"0.1.0\"\r\n app.kubernetes.io/managed-by: Helm\r\n annotations:\r\n helm.sh/install-before: \"Deployment,Statefulset,DaemonSet\"\r\nspec:\r\n podSelector: \r\n matchLabels:\r\n app.kubernetes.io/name: niklas-debug\r\n app.kubernetes.io/instance: niklas-debug\r\n securityGroups:\r\n groupIds:\r\n - sg-xyz" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-l", + "needs-rebase", + "stale" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Service", + "Statefulset", + "Daemonset" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/helm/helm/pull/9534", + "sourceRepo": "helm/helm", + "reactions": 28, + "comments": 7 + }, + "security": { + "scannedAt": "2026-02-27T17:47:30.288Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-9653-feat-helm-add-reset-then-reuse-values-flag-to-helm-upgrade.json b/solutions/cncf-generated/helm/helm-9653-feat-helm-add-reset-then-reuse-values-flag-to-helm-upgrade.json new file mode 100644 index 00000000..1519eec7 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-9653-feat-helm-add-reset-then-reuse-values-flag-to-helm-upgrade.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:25.786Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: feat(helm): Add --reset-then-reuse-values flag to 'helm upgrade'", + "description": "**What this PR does / why we need it**:\nWhen `--reset-then-reuse-values` is used on 'helm upgrade', the chart's values will be reset to the values of the deployed chart while the current release's values will be reused and merged with the values passed as argument (is any). `--reset-values` and `--reuse-values` flags take precedence over `--reset-then-reuse-values`, making it ignored if one or the other is also used.\n\nThis is a slight improvement over the many times suggested workaround of `helm get values > v.yaml; helm upgrade --reset-values --values v.yaml` in the sense that this flag allows for atomic operation.\n\nCloses #8085\nCloses #3957\n\n**If applicable**:\n- [x] this PR contains documentation\n- [x] this PR contains unit tests\n- [x] this PR has been tested for backwards compatibility", + "type": "upgrade", + "status": "completed", + "resolution": { + "summary": "Adds [iterative/dql-server → ray/kubernetes/README.md](https://github.com/iterative/dql-server/blob/main/ray/kubernetes/README.md) to the chart.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-m", + "has-one-approval" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/helm/helm/pull/9653", + "sourceRepo": "helm/helm", + "reactions": 37, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:47:25.786Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-9782-implement-changes-proposed-in-hip-6-oci-support.json b/solutions/cncf-generated/helm/helm-9782-implement-changes-proposed-in-hip-6-oci-support.json new file mode 100644 index 00000000..9f13c0af --- /dev/null +++ b/solutions/cncf-generated/helm/helm-9782-implement-changes-proposed-in-hip-6-oci-support.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:40.432Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: Implement changes proposed in HIP 6: OCI Support", + "description": "For more information, please see the following URL: https://github.com/helm/community/blob/main/hips/hip-0006.md\n\nNote: OCI support remains experimental, and you are still required to set HELM_EXPERIMENTAL_OCI=1 in your environment.\n\n**What this PR does / why we need it**:\n\nThis PR implements the changes outlined in HIP 6. This will provide a more stable foundation for Helm's OCI registry support.\n\nThis adds a new `helm push` subcommand and `Pusher` interface for uploading charts, which can be expanded to other non-OCI protocols and various uploader plugins in the future (equivalent of `helm pull` subcommand and `Getter` interface).\n\nThis removes the experimental `helm chart` subcommand, and any notion of an OCI cache has been removed for the time being. The `helm registry` subcommand remains as is.\n\nThroughout the rest of the code, OCI-based charts are now supported. For example, in `helm install`:\n```\nhelm install oci://example.com/charts/nginx --version 0.1.0\n```\n\nSupport for proven", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Good point.\n\nIt's a bit of an interesting situation... We exceedingly marked it as experimental, though it somehow went through code review and ended up in a public package.\n\n[Our compatibility contract](https://github.com/helm/helm/blob/main/CONTRIBUTING.md#semantic-versioning) states that changes SHOULD remain backward compatible. The keyword SHOULD was carefully considered for cases like this. In this particular case (an experimental flag), I think we can make the exception to remove it entirely. Had the experiment failed and we had to remove the experiment from the Helm project, we would've had to do the same thing anyways. /shrug\n\nI'd be comfortable with removing it entirely this time. But we should be more strict about putting experimental APIs in `internal` moving forward.\n\nI'll let other maintainers chime in here and see what they think - I'm sure community members relying on these APIs may have opinions as well, so I'd ask around.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "helm install oci://example.com/charts/nginx --version 0.1.0", + "//:", + "? Or just let it go?\n@jdolitsky I'm not ignoring your question. I was hoping a maintainer would chime in. I did raise this at the last helm meeting and they noted it was worth more thought.\nGood point.\r\n\r\nIt's a bit of an interesting situation... We exceedingly marked it as experimental, though it somehow went through code review and ended up in a public package.\r\n\r\n[Our compatibility contract](https://github.com/helm/helm/blob/main/CONTRIBUTING.md#semantic-versioning) states that changes SHOULD remain backward compatible. The keyword SHOULD was carefully considered for cases like this. In this particular case (an experimental flag), I think we can make the exception to remove it entirely. Had the experiment failed and we had to remove the experiment from the Helm project, we would've had to do the same thing anyways. /shrug\r\n\r\nI'd be comfortable with removing it entirely this time. But we should be more strict about putting experimental APIs in `internal` moving forward.\r\n\r\nI'll let other maintainers chime in here and see what they think - I'm sure community members relying on these APIs may have opinions as well, so I'd ask around.\nAs far as [HIP 4](https://github.com/helm/community/pull/145) is concerned:\r\n\r\n> * Experimental features are not required to ensure backward compatibility for their feature set. (They cannot, however, break backward compatibility for other parts of Helm.) Thus, a release new release of an existing experimental feature may break APIs, change its chart representations, or modify its command-line flags as long as it does not break the compatibility of non-experimental features.\r\n\r\nFollowing that guideline, new releases of experimental features may break backwards compatibility.\nLooking at the files added to public package (`/pkg`), they are experimental and can be removed cleanly without affecting the other supported APIs. It is unfortunate that they ended up in the public package but it doesn't break backwards compatibility per se. I would have no objection removing them.\nRunning `helm dep update` gives an error but still succeeds" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-xxl" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/helm/helm/pull/9782", + "sourceRepo": "helm/helm", + "reactions": 22, + "comments": 40 + }, + "security": { + "scannedAt": "2026-02-27T17:47:40.433Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-9957-feat-pkg-engine-expose-subcharts-scope-in-parent.json b/solutions/cncf-generated/helm/helm-9957-feat-pkg-engine-expose-subcharts-scope-in-parent.json new file mode 100644 index 00000000..b5550cc1 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-9957-feat-pkg-engine-expose-subcharts-scope-in-parent.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:28.650Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "helm: feat(pkg/engine): expose subcharts scope in parent", + "description": "**What this PR does / why we need it**:\nIt expose the scope (.Values, .Charts, .Releases etc.) of subcharts to the parent in `.Subcharts.`.\nThis is a proposal to fix #3920.\n\nWith a chart containing a sub-chart named \"foo\" and defining a template \"bar.fullname\", you could use this template in the parent as follows : `{{ template \"bar.fullname\" .Subcharts.foo }}`. The template would be rendered with the same scope/context as the subchart.\n\nrefs #3920\n\n**Special notes for your reviewer**:\n\n**If applicable**:\n- [ ] this PR contains documentation\n- [X] this PR contains unit tests\n- [ ] this PR has been tested for backwards compatibility", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is a regression accidently introduced in #9957.\n\nA delete call had been used on the Template key of vals. This caused\na condition where Template was not available when rendering via tpl.\nThe delete happened after ExecuteTemplate so the issue is surpsising.\nIt may possibly be a race condition. Existing tests did not catch it.\nI tried to create a test that directly tested the issue and was\nunable to replicate the error seen with real charts. This leads me\nto believe it is a race condition in the underlying Go template\npackage.\n\nThe delete call was not there before #9957. It should be safe to\nremove and keep that information.\n\nCloses #10082\n\n**What this PR does / why we need it**:\n\nA regression was accidentally introduced. This fixes the regression.\n\n**Special notes for your reviewer**:\n\nThe lack of tests is because this is difficult to test. Existing tests should have caught the issue and new tests failed to catch it. My current best guess is that this is due to a race condition somewhere in the Go template package.\n\n**If applicable**:\n- [ ] this PR contains documentation\n- [ ] this PR contains unit tests\n- [x] this PR has been tested for backwards compatibility", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "{{- define \"subchart.foo\" -}}\r\n- {{ .Parent }}\r\n- {{ .Values.val1 }}\r\n- {{ .Values.val2 }}\r\n- ...\r\n{{- end -}}", + "{{- $helperScope = merge (dict \"Parent\" $something) . -}}\r\n{{- include \"subchart.foo\" $helperScope -}}", + "env:\r\n- name: POSTGRES_HOST\r\n value: {{ .Values.postgres_host | default (include \".Subcharts.postgresql.postgresql.primary.fullname\" . ) }}" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "size-s" + ], + "category": "workloads", + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/helm/helm/pull/9957", + "sourceRepo": "helm/helm", + "reactions": 28, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:47:28.650Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/holmesgpt/holmesgpt-1035-feat-add-experimental-ag-ui-supported-chat-endpoint-and-ppl-query.json b/solutions/cncf-generated/holmesgpt/holmesgpt-1035-feat-add-experimental-ag-ui-supported-chat-endpoint-and-ppl-query.json new file mode 100644 index 00000000..c994ae6b --- /dev/null +++ b/solutions/cncf-generated/holmesgpt/holmesgpt-1035-feat-add-experimental-ag-ui-supported-chat-endpoint-and-ppl-query.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:43.281Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "holmesgpt: feat: Add experimental AG-UI supported chat endpoint and PPL query assist ", + "description": "## Summary\nThis PR introduces an experimental [AG-UI](https://docs.ag-ui.com/introduction) chat server for HolmesGPT. The primary use-case is to support AI-powered data exploration and root-cause-analysis capabilities directly within observability platforms like [OpenSearch Dashboards](https://github.com/opensearch-project/OpenSearch-Dashboards). It also adds experimental support for query assist with OpenSearch [Piped Processing Language](https://docs.opensearch.org/latest/search-plugins/sql/ppl/index/) (PPL). \n\nResolves https://github.com/robusta-dev/holmesgpt/issues/889 \n\n![ExampleOps demo video](https://github.com/kylehounslow/holmesgpt/blob/docs/experimental/ag-ui/docs/holmesgpt-agui-demo-1.gif?raw=true) \n\n![video source](https://github.com/user-attachments/assets/49eee0a3-f50f-4e5f-bc18-2afaca8a9003)\n\n## Why is this change necessary? \nPlatforms like [OpenSearch Dashboards](https://github.com/opensearch-project/OpenSearch-Dashboards) are integrating AI-powered data exploration and", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@kylehounslow can you please fix the poetry lock issue, so we can merge it?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "poetry run pytest tests/plugins/toolsets/opensearch/test_opensearch_query_assist.py", + "cd experimental/ag-ui\r\npoetry run python server.py", + "cd experimental/ag-ui/front-end \r\nyarn install \r\nyarn start" + ] + } + }, + "metadata": { + "tags": [ + "holmesgpt", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "holmesgpt" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/HolmesGPT/holmesgpt/pull/1035", + "sourceRepo": "HolmesGPT/holmesgpt", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:43.281Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/holmesgpt/holmesgpt-459-feat-cache-toolset-status-and-add-toolset-management-tool-command.json b/solutions/cncf-generated/holmesgpt/holmesgpt-459-feat-cache-toolset-status-and-add-toolset-management-tool-command.json new file mode 100644 index 00000000..a7dbfedc --- /dev/null +++ b/solutions/cncf-generated/holmesgpt/holmesgpt-459-feat-cache-toolset-status-and-add-toolset-management-tool-command.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:44.413Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "holmesgpt: feat: cache toolset status and add toolset management tool command", + "description": "fix: #426\nfix: #424\n\n- use isort to sort the imported packaged\n- introduce toolset_manager to manage toolsets, which was Config's job.\n- use load_toolsets_config as the only entrypoint to load initialize and validate a toolset from a config/definition\n- Besides the `mcp` toolset type, this PR also introduces `built-in` and `customized` toolset type to differentiate the source of these toolsets\n- introduce cli command to list and refresh the toolset status from local cache\n\n```\n ./dist/holmes/holmes toolset --help\n\n Usage: holmes toolset [OPTIONS] COMMAND [ARGS]...\n\n toolset management commands\n\n╭─ Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n│ --help Show this message and exit. │\n╰─────────────────────────────────────────────────────────────────────────────────────────", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Had call with @moshemorad, the notes from the call\n\n```\nprint toolset is load from cache during ask command\nfix the toolset table issue\nupdate the custom-toolset flag description to the toolset is for experimental and not made permanent\nadd doc to explain the customer-facing changes.\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "./dist/holmes/holmes toolset --help\r\n\r\n Usage: holmes toolset [OPTIONS] COMMAND [ARGS]...\r\n\r\n toolset management commands\r\n\r\n╭─ Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\r\n│ --help Show this message and exit. │\r\n╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\r\n╭─ Commands ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\r\n│ list List build-int and custom toolsets status of CLI │\r\n│ refresh Refresh build-in and custom toolsets status of CLI │\r\n╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯", + "./dist/holmes/holmes toolset list\r\n name status enabled type path error\r\n-------------------------------- -------- --------- -------- ----------------------------------- ------------------------------------------------------------------\r\nconfluence failed True built-in Environment variable CONFLUENCE_BASE_URL was not set\r\ndocker/core enabled True built-in\r\nhelm/core enabled True built-in\r\nkubernetes/logs enabled True built-in\r\nservicenow disabled False built-in\r\naks/node-health enabled True built-in\r\naws/security failed True built-in `aws sts get-caller-identity` returned 127\r\naws/rds failed True built-in `aws sts get-caller-identity` returned 127\r\nargocd/core failed True built-in `argocd version` returned 127\r\nslab failed True built-in Environment variable SLAB_API_KEY was not set\r\naks/core enabled True built-in\r\nkubernetes/core enabled True built-in\r\nkubernetes/live-metrics enabled True built-in\r\nkubernetes/kube-prometheus-stack enabled True built-in\r\nkubernetes/krew-extras failed True built-in `kubectl version --client && kubectl lineage --version` returned 1\r\nkubernetes/kube-lineage-extras failed True built-in `kubectl version --client && kube-lineage --version` returned 127\r\ninternet enabled True built-in\r\nrobusta failed True built-in The data access layer is not available\r\nopensearch/status disabled False built-in\r\ngrafana/loki disabled False built-in\r\ngrafana/tempo disabled False built-in\r\nnewrelic failed True built-in No configuration provided\r\ngrafana/grafana disabled False built-in\r\nnotion enabled True built-in\r\nkafka/admin failed True built-in The toolset is missing its configuration\r\ndatadog failed True built-in\r\nprometheus/metrics enabled True built-in\r\ndatetime enabled True built-in\r\nopensearch/logs failed True built-in Missing opensearch traces URL. Check your config\r\nopensearch/traces failed True built-in Missing opensearch traces URL. Check your config\r\ncoralogix/logs failed True built-in The toolset is missing its configuration\r\nrabbitmq/core failed True built-in RabbitMQ toolset is misconfigured. 'management_url' is required.\r\ngit failed True built-in Missing one or more required Git configuration values.\r\nig/core enabled True custom /home/azureuser/inspect_gadget.yaml", + "./dist/holmes/holmes ask \"detect why the k8s pod client under namespace test-ns cannot resolve dns\" -f /home/azureuser/llm/demo/dns_troubleshooting_instructions.md --max-steps 20 -t ~/inspect_gadget.yaml\r\nUser: detect why the k8s pod client under namespace test-ns cannot resolve dns\r\nLoading file /home/azureuser/llm/demo/dns_troubleshooting_instructions.md\r\nRunning tool kubectl_find_resource: kubectl get -A --show-labels -o wide pod | grep client tools.py:125\r\nRunning tool kubectl_describe: kubectl describe pod client -n test-ns tools.py:125\r\nRunning tool kubectl_get_by_kind_in_namespace: kubectl get --show-labels -o wide pod -n kube-system tools.py:125\r\nRunning tool kubectl_get_by_kind_in_namespace: kubectl get --show-labels -o wide svc -n kube-system tools.py:125\r\nRunning tool kubectl_describe: kubectl describe pod coredns-57d886c994-8h9gt -n kube-system tools.py:125\r\nRunning tool kubectl_describe: kubectl describe pod coredns-57d886c994-vqmrz -n kube-system tools.py:125\r\nRunning tool kubectl_get_by_name: kubectl get --show-labels -o wide svc kube-dns -n kube-system tools.py:125\r\nRunning tool kubectl_get_yaml: kubectl get -o yaml pod client -n test-ns tools.py:125\r\nRunning tool kubectl_get_yaml: kubectl get -o yaml pod coredns-57d886c994-8h9gt -n kube-system tools.py:125\r\nRunning tool kubectl_get_yaml: kubectl get -o yaml pod coredns-57d886c994-vqmrz -n kube-system tools.py:125\r\nRunning tool kubectl_get_yaml: kubectl get -o yaml svc kube-dns -n kube-system tools.py:125\r\nRunning tool kubectl_get_yaml: kubectl get -o yaml configmap coredns -n kube-system tools.py:125\r\nRunning tool kubectl_get_yaml: kubectl get -o yaml configmap coredns-custom -n kube-system tools.py:125\r\nRunning tool kubectl_get_by_kind_in_namespace: kubectl get --show-labels -o wide networkpolicy -n test-ns tools.py:125\r\nRunning tool kubectl_get_yaml: kubectl get -o yaml networkpolicy default-deny-egress -n test-ns tools.py:125\r\ntool_calling_llm.call - completed in 9 iterations - 27071ms performance_timing.py:41\r\nAI: DNS resolution fails for pod client in namespace test-ns because a default-deny-egress NetworkPolicy is present. This policy blocks all outbound traffic,\r\nincluding DNS (UDP/TCP port 53) to the CoreDNS service at 10.0.0.10.\r\n\r\nKey findings:\r\n\r\n • CoreDNS pods are healthy and running.\r\n • CoreDNS service is correctly configured.\r\n • Pod spec uses dnsPolicy: ClusterFirst (default, correct).\r\n • NetworkPolicy default-deny-egress in test-ns blocks all egress by default.\r\n\r\nTo resolve, update the NetworkPolicy to allow egress to 10.0.0.10 on port 53/UDP and 53/TCP.\r\n\r\nSee the official Kubernetes DNS debugging guide for more: https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/ (section:\r\n\"Network policies blocking DNS\")." + ] + } + }, + "metadata": { + "tags": [ + "holmesgpt", + "sandbox", + "app-definition", + "enhancement" + ], + "category": "workloads", + "cncfProjects": [ + "holmesgpt" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Configmap", + "Job", + "Namespace", + "Networkpolicy", + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/HolmesGPT/holmesgpt/pull/459", + "sourceRepo": "HolmesGPT/holmesgpt", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:44.413Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/in-toto/in-toto-143-add-user-settings-module.json b/solutions/cncf-generated/in-toto/in-toto-143-add-user-settings-module.json new file mode 100644 index 00000000..d37a0c50 --- /dev/null +++ b/solutions/cncf-generated/in-toto/in-toto-143-add-user-settings-module.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:45.956Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "in-toto: Add user settings module", + "description": "The newly added module provides methods to read environment variables and rcfiles and to write them to the `in_toto.settings` module, i.e. override them (fix #137).\n\nThe PR also adds a call of the settings parsing and overriding method to in-toto's `__init__.py`, which should be the first thing that's executed when using in_toto. \nAny code that accesses `in_toto.settings` thereafter receives the values defined by the user.\n\nMore information about how the user sets the settings can be found in the [header docstring of `user_settings.py`](https://github.com/in-toto/in-toto/blob/4390aeae19fd9e8cee69790acd76510298c812ec/in_toto/user_settings.py#L15-L48).", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@vladimir-v-diaz, thanks for you comments. I tried to address them accordingly. Let me know what you think.\n\nUpdate: It seems that 2ea0e5d broke it, because `ConfigParser` converts names to lower case. I'll fix this on Monday. You can review the rest if you want though.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "in-toto", + "graduated", + "security" + ], + "category": "security", + "cncfProjects": [ + "in-toto" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/in-toto/in-toto/pull/143", + "sourceRepo": "in-toto/in-toto", + "reactions": 0, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:47:45.956Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/in-toto/in-toto-48-13-use-validators-for-the-models.json b/solutions/cncf-generated/in-toto/in-toto-48-13-use-validators-for-the-models.json new file mode 100644 index 00000000..8d801cb4 --- /dev/null +++ b/solutions/cncf-generated/in-toto/in-toto-48-13-use-validators-for-the-models.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:45.087Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "in-toto: 13 use validators for the models", + "description": "This patch series refactors code in the models and adds the validate function to ensure the properties of a model are populated properly. I also added unit tests for these methods.\n\ncloses #13", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "cdc03038575eb7742dcb21ec5fea62fc955ccf2b should not be part of this PR", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "in-toto", + "graduated", + "security", + "enhancement" + ], + "category": "security", + "cncfProjects": [ + "in-toto" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/in-toto/in-toto/pull/48", + "sourceRepo": "in-toto/in-toto", + "reactions": 0, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:47:45.087Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/in-toto/in-toto-95-add-in-toto-mock.json b/solutions/cncf-generated/in-toto/in-toto-95-add-in-toto-mock.json new file mode 100644 index 00000000..2e58694d --- /dev/null +++ b/solutions/cncf-generated/in-toto/in-toto-95-add-in-toto-mock.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:44.207Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "in-toto: Add in_toto_mock", + "description": "* **What does this PR do?**\nAdds in_toto_mock command, which is a stripped down version of in_toto_run.\n\n* **Are there points in the code the reviewer needs to double check?**\nNo, its basically a rip off of [in-toto-run](https://github.com/in-toto/in-toto/blob/develop/in_toto/in_toto_run.py). Opened both files in [Meld](http://meldmerge.org/) and copy pasted shamelessly :stuck_out_tongue: \n\n* **Who do you think should review this PR?**\n@lukpueh \n\n* **Does this PR meet the acceptance criteria?**\n\n - Documentation created/updated (e.g., did you update the docstrings?)\n * README.md requires editing.\n\n - All builds are passing (did you run the tests locally?)\n * Yes\n\n - Code follows the [style guide](https://github.com/secure-systems-lab/code-style-guidelines)\n * Followed manually. Is there some code formatter you are using ? like [yapf](https://github.com/google/yapf).\n\n* **What are the relevant issue numbers fixed (add one line for each)?**\nFixed #93 \n\n* **Steps Requ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Awesome PR description 👍", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "in-toto", + "graduated", + "security" + ], + "category": "security", + "cncfProjects": [ + "in-toto" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/in-toto/in-toto/pull/95", + "sourceRepo": "in-toto/in-toto", + "reactions": 1, + "comments": 26 + }, + "security": { + "scannedAt": "2026-02-27T17:47:44.207Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/inspektor-gadget/inspektor-gadget-1396-making-gadgets-self-contained-adding-metadata-operators-in.json b/solutions/cncf-generated/inspektor-gadget/inspektor-gadget-1396-making-gadgets-self-contained-adding-metadata-operators-in.json new file mode 100644 index 00000000..74e4896a --- /dev/null +++ b/solutions/cncf-generated/inspektor-gadget/inspektor-gadget-1396-making-gadgets-self-contained-adding-metadata-operators-in.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:52.536Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "inspektor-gadget: Making gadgets self-contained, adding metadata, operators, interfaces and more: Part 2", + "description": "This is the second PR of this series, the first one was #1281.\n\nThis part focuses on the communication between `kubectl-gadget` and `gadgettracermanager`. Communication has been replaced with gRPC, so that the whole lifecycle of a gadget on each node is now handled by a connection to each node rather than a CR. In most cases this will simplify things, as there are no longer stale traces, response times should be lower and in general the workflow is a lot simpler and less error prone.\n\nCRs will return in a future PR as they're still very useful for certain use-cases (e.g. long-lived or shared traces, metric collection).\n\nThere are still a couple of TODOs:\n\n* [x] a resolver (ip to pod/service names) that was previously used for trace/network needs to be replaced by an operator (that then can be re-used for other gadgets as well) -> https://github.com/inspektor-gadget/inspektor-gadget/pull/1413\n* [x] buildflags for non-linux binaries need to be adjusted\n\nMore documentation will follow, es", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR implements a basic support for bpftrace. \n\n```bash\n$ kubectl gadget bpftrace -e 'tracepoint:syscalls:sys_enter_openat { printf(\"%s %s\\n\", comm, str(args->filename)); }'\nNODE OUTPUT\nminikube-m02 Attaching 1 probe...\nminikube-m02 bpftrace /sys/devices/system/cpu/online\nminikube-m02 bpftrace /sys/devices/system/cpu/online\nminikube-m03 Attaching 1 probe...\nminikube-m02 bpftrace /dev/null\nminikube-m02 bpftrace /sys/kernel/debug/tracing/events/syscalls/sys_enter_openat/id\nminikube-m02 bpftrace /sys/devices/system/cpu/online\nminikube-m03 bpftrace /sys/devices/system/cpu/online\nminikube-m03 bpftrace /sys/devices/system/cpu/online\nminikube-m02 bpftrace /sys/devices/system/cpu/online\nminikube Attaching 1 probe...\nminikube-m02 bpftrace /dev/null\nminikube-m02 bpftrace /sys/kernel/debug/tracing/events/syscalls/sys_enter_openat/id\nminikube-m02 runc /usr/bin/runc\nminikube-m02 runc /proc/sys/kernel/cap_last_cap\nminikube runc /proc/sys/kernel/cap_last_cap\nminikube-m03 runc /proc/sys/kernel/cap_last_cap\nminikube-m02 runc\n...\n```\n\nTODO: (for future PRs?)\n- [ ] Support arbitrary parameters, i.e, mak", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ kubectl gadget bpftrace -e 'tracepoint:syscalls:sys_enter_openat { printf(\"%s %s\\n\", comm, str(args->filename)); }'\r\nNODE OUTPUT\r\nminikube-m02 Attaching 1 probe...\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube-m03 Attaching 1 probe...\r\nminikube-m02 bpftrace /dev/null\r\nminikube-m02 bpftrace /sys/kernel/debug/tracing/events/syscalls/sys_enter_openat/id\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube-m03 bpftrace /sys/devices/system/cpu/online\r\nminikube-m03 bpftrace /sys/devices/system/cpu/online\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube Attaching 1 probe...\r\nminikube-m02 bpftrace /dev/null\r\nminikube-m02 bpftrace /sys/kernel/debug/tracing/events/syscalls/sys_enter_openat/id\r\nminikube-m02 runc /usr/bin/runc\r\nminikube-m02 runc /proc/sys/kernel/cap_last_cap\r\nminikube runc /proc/sys/kernel/cap_last_cap\r\nminikube-m03 runc /proc/sys/kernel/cap_last_cap\r\nminikube-m02 runc\r\n...", + "> \"node1\": {histogram in JSON format}\r\n> \"node2\": {histogram in JSON format}\r\n>", + "> [{socket1 of node1}, {socket2 of node1}]\r\n> [{socket1 of node2}, {socket2 of node2}]\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "inspektor-gadget", + "sandbox", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "inspektor-gadget" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/inspektor-gadget/inspektor-gadget/pull/1396", + "sourceRepo": "inspektor-gadget/inspektor-gadget", + "reactions": 2, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:48:52.536Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/inspektor-gadget/inspektor-gadget-3659-gadgets-add-qdisc-latency-tracer.json b/solutions/cncf-generated/inspektor-gadget/inspektor-gadget-3659-gadgets-add-qdisc-latency-tracer.json new file mode 100644 index 00000000..5570476d --- /dev/null +++ b/solutions/cncf-generated/inspektor-gadget/inspektor-gadget-3659-gadgets-add-qdisc-latency-tracer.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:50.742Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "inspektor-gadget: gadgets: add qdisc_latency tracer", + "description": "# Introduce qdisc_latency profiler\n\nIn order to analyse network related issues, the qdisc_latency profiler tool has been added. It will visualise the amount a network packet spend in the network scheduler in an histogram. \n\n**Note:** The profiler requires at least kernel 5.14, as the tracepoints used were introduced in that kernel version. \n\n## How to use\n\nRun the new profiler via `ig profile qdisc-latency`. \n\n## Testing done\n\nI tried running the gadget while performing various network tasks. One strange thing I could not get figured out though was, that I never managed to get a packet queued in my qdisc. Maybe somebody has an idea here? \n\nFix #3118", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "See https://github.com/inspektor-gadget/inspektor-gadget/pull/3659#issuecomment-2671299658\n\n> error getting package metadata (path: /tmp/artifact-hub1557883500/gadgets/profile_qdisc_latency): error validating package metadata file: 1 error occurred:\n> invalid metadata: description not provided\n\nFixes: 76d79825e440 (\"gadgets: add qdisc_latency tracer\") #3659\n\ncc @patrickpichler", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "tc qdisc add dev eth0 root netem delay 100ms" + ] + } + }, + "metadata": { + "tags": [ + "inspektor-gadget", + "sandbox", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "inspektor-gadget" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/inspektor-gadget/inspektor-gadget/pull/3659", + "sourceRepo": "inspektor-gadget/inspektor-gadget", + "reactions": 3, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:50.742Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-14578-thrift-support.json b/solutions/cncf-generated/istio/istio-14578-thrift-support.json new file mode 100644 index 00000000..3a7dbf44 --- /dev/null +++ b/solutions/cncf-generated/istio/istio-14578-thrift-support.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:53.213Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "istio: Thrift Support", + "description": "Adds Thrift support to control plane components.\n\nFixes https://github.com/istio/istio/issues/12845", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi @pnovotnak. Thanks for your PR.\n\nI'm waiting for a [istio](https://github.com/orgs/istio/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/istio/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "needs-rebase", + "ok-to-test", + "size-xxl", + "cla--yes" + ], + "category": "networking", + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/istio/istio/pull/14578", + "sourceRepo": "istio/istio", + "reactions": 6, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:47:53.213Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-16865-fix-istioctl-release-binary-corruption.json b/solutions/cncf-generated/istio/istio-16865-fix-istioctl-release-binary-corruption.json new file mode 100644 index 00000000..01eb51f4 --- /dev/null +++ b/solutions/cncf-generated/istio/istio-16865-fix-istioctl-release-binary-corruption.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:50.796Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "istio: Fix istioctl release binary corruption", + "description": "The release acrhive scripts invoke sed to update versions fields in\nseveral charts. This search/replace ran over the entire release\narchive included istioctl binaries. The istioctl operator commands\nwhich were introduced in\nhttps://github.com/istio/istio/pull/16622/files include embedded\ncharts which matched the sed expression. This corrupted the data\nportion istioctl binaries when created through the official release process.\n\nfixes https://github.com/istio/istio/issues/16660 by reducing scope of\nsed to the install/kubernetes/helm subdirectory.\n\nPlease provide a description for what this PR is for.\n\nAnd to help us figure out who should review this PR, please \nput an X in all the areas that this PR affects.\n\n[ ] Configuration Infrastructure\n[ ] Docs\n[ ] Installation\n[ ] Networking\n[ ] Performance and Scalability\n[ ] Policies and Telemetry\n[ ] Security\n[ ] Test and Release\n[ ] User Experience\n[ ] Developer Infrastructure", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is an automated cherry-pick of #16865\n\nFixes #16660", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "size-xs", + "cla--yes" + ], + "category": "networking", + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/istio/istio/pull/16865", + "sourceRepo": "istio/istio", + "reactions": 6, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:47:50.796Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-17427-add-all-securitycontext-fields-in-injected-containers.json b/solutions/cncf-generated/istio/istio-17427-add-all-securitycontext-fields-in-injected-containers.json new file mode 100644 index 00000000..2e837670 --- /dev/null +++ b/solutions/cncf-generated/istio/istio-17427-add-all-securitycontext-fields-in-injected-containers.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:51.826Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "istio: Add all securityContext fields in injected containers", + "description": "Fixes https://github.com/istio/istio/issues/17318\n\n[x] Configuration Infrastructure\n[ ] Docs\n[ ] Installation\n[ ] Networking\n[ ] Performance and Scalability\n[ ] Policies and Telemetry\n[ ] Security\n[ ] Test and Release\n[ ] User Experience\n[ ] Developer Infrastructure", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "lgtm, mention me once its not WIP if you need an approval", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "securityContext:\r\n capabilities:\r\n add:\r\n - NET_ADMIN\r\n drop:\r\n - ALL", + "securityContext:\r\n capabilities:\r\n add:\r\n - NET_ADMIN\r\n - NET_RAW\r\n drop:\r\n - ALL" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "area-config", + "size-xxl", + "cla--yes" + ], + "category": "networking", + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/istio/istio/pull/17427", + "sourceRepo": "istio/istio", + "reactions": 6, + "comments": 26 + }, + "security": { + "scannedAt": "2026-02-27T17:47:51.826Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-28261-wait-until-ack-before-sending-additional-pushes.json b/solutions/cncf-generated/istio/istio-28261-wait-until-ack-before-sending-additional-pushes.json new file mode 100644 index 00000000..67ec23f8 --- /dev/null +++ b/solutions/cncf-generated/istio/istio-28261-wait-until-ack-before-sending-additional-pushes.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:57.768Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "istio: Wait until ACK before sending additional pushes", + "description": "Fixes: #25685\n\nAt large scale, Envoy suffers from overload of XDS pushes, and there is\nno backpressure in the system. Other control planes, such as any based\non go-control-plane, outperform Istio in config update propogations\nunder load as a result.\n\nThis changes adds a backpressure mechanism to ensure we do not push more\nconfigs than Envoy can handle. By slowing down the pushes, the\npropogation time of new configurations actually increases. We do this by\nkeeping note, but not sending, any push requests where that TypeUrl has\nan un-ACKed request in flight. When we get an ACK, if there is a pending\npush request we will immediately trigger it. This effectively means that\nin a high churn environment, each proxy will always have exactly 1\noutstanding push per type, and when the ACK is recieved we will\nimmediately send a new update.\n\nThis graph shows the time between sending a VirtualService and the route being active in Envoy. X axis is number of virtual services, Y axis is ms to get ready", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Skipping CI for Draft Pull Request.\nIf you want CI signal for your change, please convert it to an actual PR.\nYou can still manually trigger a test run with `/test all`", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "size-l", + "cla--yes" + ], + "category": "networking", + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/istio/istio/pull/28261", + "sourceRepo": "istio/istio", + "reactions": 4, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:47:57.768Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-35641-feature-wildcard-matching.json b/solutions/cncf-generated/istio/istio-35641-feature-wildcard-matching.json new file mode 100644 index 00000000..70cbdb50 --- /dev/null +++ b/solutions/cncf-generated/istio/istio-35641-feature-wildcard-matching.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:54.762Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "istio: Feature/wildcard matching", + "description": "**Please provide a description of this PR:**\n\nAdd wildcard path matching. This pr makes possible to write authz. paths like:\n\n- \"/test/*/resource\" \n- \"/test/\\*/resource/\\*/subResource\"\n\nResolves: https://github.com/istio/istio/issues/16585\n\nI've test it locally and it works fine.\n\nHere is the AuthorizationPolicy I wrote:\n\n```\napiVersion: v1\nitems:\n- apiVersion: security.istio.io/v1beta1\n kind: AuthorizationPolicy\n metadata:\n name: test-auth-policy\n spec:\n action: DENY\n rules:\n - to:\n - operation:\n methods:\n - GET\n paths:\n - \"/test/*/example\"\n - \"/test/*/example/*/subpath\"\n when:\n - key: request.headers[test]\n notValues:\n - example\n - to:\n - operation:\n methods:\n - POST\n paths:\n - \"/test/*/example\"\n when:\n - key: request.headers[test]\n notValues:\n - example\n selector:\n matchLabels:\n app: nginx\nkind: List\nmetadata:\n r", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Add StringMatcher options to Path and Condition in AuthorizationPolicy.\nWe use the same StringMatcher as used in VirtualHosts to provide consistent matching in both.\nIf both String and StringMatcher are present, they should be appended.\n\nFixes https://github.com/istio/istio/issues/16585\nRelated: https://github.com/istio/istio/pull/35641\n\n@howardjohn wdyt? this way we can prevent introducing breaking changes in the exiting APIs but still support matching by regex as needed.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: v1\nitems:\n- apiVersion: security.istio.io/v1beta1\n kind: AuthorizationPolicy\n metadata:\n name: test-auth-policy\n spec:\n action: DENY\n rules:\n - to:\n - operation:\n methods:\n - GET\n paths:\n - \"/test/*/example\"\n - \"/test/*/example/*/subpath\"\n when:\n - key: request.headers[test]\n notValues:\n - example\n - to:\n - operation:\n methods:\n - POST\n paths:\n - \"/test/*/example\"\n when:\n - key: request.headers[test]\n notValues:\n - example\n selector:\n matchLabels:\n app: nginx\nkind: List\nmetadata:\n resourceVersion: \"\"\n selfLink: \"\"" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "area-networking", + "area-security", + "area-user-experience", + "size-m", + "cla--yes", + "needs-ok-to-test", + "lifecycle-staleproof", + "lifecycle-automatically-closed" + ], + "category": "networking", + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/istio/istio/pull/35641", + "sourceRepo": "istio/istio", + "reactions": 5, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:47:54.762Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-46603-feat-gateway-api-customize-gateway-deployments-via-class-annotations.json b/solutions/cncf-generated/istio/istio-46603-feat-gateway-api-customize-gateway-deployments-via-class-annotations.json new file mode 100644 index 00000000..d5e1c44f --- /dev/null +++ b/solutions/cncf-generated/istio/istio-46603-feat-gateway-api-customize-gateway-deployments-via-class-annotations.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:58.771Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "istio: feat(gateway-api): customize gateway deployments via class annotations", + "description": "Closes #46594\n\nThis PR enhances custom `GatewayClass`'es and allows a per-class customization. Currently we have templates and service types hardcoded for each controller type instead.\n\n* Support `inject.istio.io/templates` annotation on `GatewayClass` to pick a custom template from the istiod ConfigMap.\n * Uses controller's default if unset.\n * Supports just a single template for now. No template chaining like for sidecars.\n* Support `networking.istio.io/service-type` annotation on `GatewayClass` to override controller's default. This annotation is already supported for `Gateway` to perform a per-gateway override.\n * Uses controller's default if unset.", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "Hi @svrakitin. Thanks for your PR.\n\nI'm waiting for a [istio](https://github.com/orgs/istio/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/istio/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=istio%2Fistio).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "networking.istio.io/service-type: |\r\n ClusterIP\r\n ---\r\n kind: ClusterRole\r\n name: give-everyone-admin-permission\r\n ---", + "gateway.istio.io/pod-template: |\r\n spec:\r\n nodeSelector: {}\r\n affinity: {}\r\n tolerations: []" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "ok-to-test", + "size-l", + "lifecycle-stale", + "lifecycle-automatically-closed", + "release-notes-none" + ], + "category": "networking", + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [ + "Deployment", + "Service", + "Configmap" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/istio/istio/pull/46603", + "sourceRepo": "istio/istio", + "reactions": 3, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:47:58.771Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-55283-implement-gateway-waypoint-customization.json b/solutions/cncf-generated/istio/istio-55283-implement-gateway-waypoint-customization.json new file mode 100644 index 00000000..1a542b20 --- /dev/null +++ b/solutions/cncf-generated/istio/istio-55283-implement-gateway-waypoint-customization.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:48.289Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "istio: Implement Gateway/Waypoint customization", + "description": "Implements https://docs.google.com/document/d/13ZoxgR0CIaOuwyhA4xxI7L1XwsZaDVOmq13K8922zcU/edit?tab=t.0\n\nFixes https://github.com/istio/istio/issues/53964\nSolves the use case for https://github.com/istio/istio/issues/53473 (but doesn't directly implement what they asked for)\nFixes https://github.com/istio/istio/issues/46594\nMaybe resolves https://github.com/istio/istio/issues/54453 (not sure if you can _remove_ a port)\nFixes https://github.com/istio/istio/issues/53189\nFixes https://github.com/istio/istio/issues/55585\n\nThis PR implements customization of the Kubernetes Gateway controller. This means a `istio` or `istio-waypoint` Gateway object output can be customized.\n\nThere is a hierarchy of: `builtin defaults` < `GatewayClass` settings (new) < `Gateway` settings (new).\n\nUsers configure these via `ConfigMap` resources. The keys in the configmap represent each resource output.\n\nFor Gateway, they are referenced by `spec.infrastructure.parametersRef`.\nFor GatewayClass, we have a naming s", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Skipping CI for Draft Pull Request.\nIf you want CI signal for your change, please convert it to an actual PR.\nYou can still manually trigger a test run with `/test all`", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: gateway.networking.k8s.io/v1beta1\r\nkind: Gateway\r\nmetadata:\r\n name: gateway\r\nspec:\r\n infrastructure:\r\n parametersRef:\r\n group: \"\"\r\n kind: ConfigMap\r\n name: gw-options\r\n gatewayClassName: istio\r\n listeners:\r\n - name: default\r\n port: 80\r\n protocol: HTTP\r\n---\r\napiVersion: gateway.networking.k8s.io/v1beta1\r\nkind: HTTPRoute\r\nmetadata:\r\n name: echo\r\nspec:\r\n parentRefs:\r\n - name: gateway\r\n rules:\r\n - backendRefs:\r\n - name: echo\r\n port: 80\r\n---\r\napiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n name: gw-options\r\ndata:\r\n horizontalPodAutoscaler: |\r\n spec:\r\n minReplicas: 2\r\n maxReplicas: 2\r\n\r\n deployment: |\r\n metadata:\r\n annotations:\r\n john-gw: hi\r\n spec:\r\n replicas: 4\r\n template:\r\n spec:\r\n containers:\r\n - name: istio-proxy\r\n resources:\r\n requests:\r\n cpu: 222m\r\n---\r\napiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n name: istio-default-gatewayclass-istio\r\n namespace: istio-system\r\ndata:\r\n deployment: |-\r\n metadata:\r\n annotations:\r\n john-gwc: hix", + "apiVersion: install.istio.io/v1alpha1\r\nkind: IstioOperator\r\nspec:\r\n values:\r\n gatewayClasses:\r\n istio:\r\n deployment:\r\n metadata:\r\n annotations:\r\n john: hi" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "size-xl" + ], + "category": "networking", + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Ingress", + "Configmap", + "Namespace", + "Horizontalpodautoscaler" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/istio/istio/pull/55283", + "sourceRepo": "istio/istio", + "reactions": 9, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:47:48.289Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-57679-remove-use-of-comment-module-when-testing-kernel-support-for-iptable.json b/solutions/cncf-generated/istio/istio-57679-remove-use-of-comment-module-when-testing-kernel-support-for-iptable.json new file mode 100644 index 00000000..a297175d --- /dev/null +++ b/solutions/cncf-generated/istio/istio-57679-remove-use-of-comment-module-when-testing-kernel-support-for-iptable.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:56.792Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "istio: Remove use of comment module when testing kernel support for iptables version", + "description": "**Please provide a description of this PR:**\nFixes https://github.com/istio/istio/issues/57678\n\nUse of comment module when verifying there is kernal support for the current iptables version is unnecessary, and its use prevents use of istio in gVisor.\n\nThis change removes its use, given it is unnecessary for the test.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**Please provide a description of this PR:**", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "area-networking", + "ok-to-test", + "size-xs", + "release-notes-none" + ], + "category": "networking", + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/istio/istio/pull/57679", + "sourceRepo": "istio/istio", + "reactions": 5, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:47:56.792Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-57971-support-envoyfilter-in-waypoint.json b/solutions/cncf-generated/istio/istio-57971-support-envoyfilter-in-waypoint.json new file mode 100644 index 00000000..30b12d76 --- /dev/null +++ b/solutions/cncf-generated/istio/istio-57971-support-envoyfilter-in-waypoint.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:47:49.272Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "istio: Support EnvoyFilter in Waypoint", + "description": "**Please provide a description of this PR:**\n\nAPI: https://github.com/istio/api/pull/3577\n\nFixes: https://github.com/istio/istio/issues/43720", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Skipping CI for Draft Pull Request.\nIf you want CI signal for your change, please convert it to an actual PR.\nYou can still manually trigger a test run with `/test all`", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "size-xxl" + ], + "category": "networking", + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/istio/istio/pull/57971", + "sourceRepo": "istio/istio", + "reactions": 8, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:47:49.272Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/jaeger/jaeger-1050-wip-add-native-plugins-support-to-the-collector.json b/solutions/cncf-generated/jaeger/jaeger-1050-wip-add-native-plugins-support-to-the-collector.json new file mode 100644 index 00000000..826acfb8 --- /dev/null +++ b/solutions/cncf-generated/jaeger/jaeger-1050-wip-add-native-plugins-support-to-the-collector.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:26.025Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "jaeger: WIP: Add native plugins support to the Collector", + "description": "## Which problem is this PR solving?\nThis PR provides the foundation to support plugins in Jaeger using native Go plugins (`pkg/plugin`). Resolves #422 \n\nSupported plugins:\n- Storage (`spanstore` + `dependencystore`)\n- Span Processor (`Sanitizer`, `PreProcessor`, `PreSave`, `SpanFilter`)\n\n## Short description of the changes\nThe current PR is working and this comment https://github.com/jaegertracing/jaeger/pull/1050#issuecomment-456126756 includes an example of a storage plugin that seems to work so far.\n\n## What is missing\n- Configuration file to order the plugins\n- Unit/Integration tests\n- More examples\n- Documentation\n- Support for that in the Dockerfile\n\n## How to run it\n1. Compile the plugins\n```\ncd examples/collector/plugins/\ngo build -buildmode=plugin sanitizer_logger.go\ngo build -buildmode=plugin pre_processor_logger.go\ngo build -buildmode=plugin pre_save_logger.go\ngo build -buildmode=plugin span_filter_logger.go\n```\n2. Run the collector with the proper flag\n```\nSPAN_STORAGE", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Great! Looks very promising. \n\nSide note: I didn't even know we had presave postsave... Whatever for?\n\nA few comments on the design:\n- I think this should be generic capability, not collector specific. That means, besides going into pkg/plugins module, a global cli flag for plugins dir, similar to metrics/logging. Btw, you mentioned env var: that should come auto from viper, as long as you have a flag. However, ultimately we will need this to behave similar to span-storage-type env var, because I can envision some plugins needing access to cli flags.\n- perhaps plugin factory can be something like `Get(name string, type reflect.Type) ([]interface{}, error)`, ie factory initially loads all plugin libs and allows to search for symbols of specific name and type, returning all found. This way we don't hardcode specific interfaces inside the factory. The caller is responsible for chaining or whatever is appropriate. The callers may also deal with things that implement Initializable/[Configurable](https://github.com/jaegertracing/jaeger/blob/96ce340848f17699ced8f71fcd8129689ea0b7ed/plugin/configurable.go#L25), already introduced in `storage.Factory`.\n- one important consideration for things like filters/sanitizers is the order. Not sure how to enforce it; one option is instead of walking a directory we take a file that lists plugin files in user-defined order and we respect it. That seems cleaner and more explicit than the other approaches I can think of, yet does not address the ca", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "cd examples/collector/plugins/\r\ngo build -buildmode=plugin sanitizer_logger.go\r\ngo build -buildmode=plugin pre_processor_logger.go\r\ngo build -buildmode=plugin pre_save_logger.go\r\ngo build -buildmode=plugin span_filter_logger.go", + "SPAN_STORAGE_TYPE=memory LOG_LEVEL=debug PLUGINS_DIRECTORY=`pwd`/examples/collector/plugins/ go run cmd/collector/main.go", + "PreProcessSpans... 4 spans\r\nSpanFilter... TraceID=66333d0d4a3452c SpanID=ef9dbde43426f82f OperationName=truncated\r\nSpanFilter... TraceID=66333d0d4a3452c SpanID=ef9dbde43426f82f OperationName=truncated\r\nSpanFilter... TraceID=66333d0d4a3452c SpanID=7c8453653cab6cbf OperationName=truncated\r\nSpanFilter... TraceID=66333d0d4a3452c SpanID=7c8453653cab6cbf OperationName=truncated\r\nSpanFilter... TraceID=66333d0d4a3452c SpanID=8de1edfd89f8e32e OperationName=truncated\r\nSpanFilter... TraceID=66333d0d4a3452c SpanID=8de1edfd89f8e32e OperationName=truncated\r\nSpanFilter... TraceID=66333d0d4a3452c SpanID=66333d0d4a3452c OperationName=truncated\r\nSpanFilter... TraceID=66333d0d4a3452c SpanID=66333d0d4a3452c OperationName=truncated\r\nSanitizer... TraceID=66333d0d4a3452c SpanID=ef9dbde43426f82f OperationName=truncated\r\nSanitizer... TraceID=66333d0d4a3452c SpanID=7c8453653cab6cbf OperationName=truncated\r\nSanitizer... TraceID=66333d0d4a3452c SpanID=66333d0d4a3452c OperationName=truncated\r\nSanitizer... TraceID=66333d0d4a3452c SpanID=8de1edfd89f8e32e OperationName=truncated\r\nPreSave... TraceID=66333d0d4a3452c SpanID=66333d0d4a3452c OperationName=truncated\r\nPreSave... TraceID=66333d0d4a3452c SpanID=8de1edfd89f8e32e OperationName=truncated\r\nPreSave... TraceID=66333d0d4a3452c SpanID=ef9dbde43426f82f OperationName=truncated\r\nPreSave... TraceID=66333d0d4a3452c SpanID=7c8453653cab6cbf OperationName=truncated" + ] + } + }, + "metadata": { + "tags": [ + "jaeger", + "graduated", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "jaeger" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/jaegertracing/jaeger/pull/1050", + "sourceRepo": "jaegertracing/jaeger", + "reactions": 1, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:43:26.025Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/jaeger/jaeger-1197-support-archive-traces-for-es-storage.json b/solutions/cncf-generated/jaeger/jaeger-1197-support-archive-traces-for-es-storage.json new file mode 100644 index 00000000..a80d349a --- /dev/null +++ b/solutions/cncf-generated/jaeger/jaeger-1197-support-archive-traces-for-es-storage.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:27.051Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "jaeger: Support archive traces for ES storage", + "description": "Resolves https://github.com/jaegertracing/jaeger/issues/818\nRelated to https://github.com/jaegertracing/jaeger/issues/1242\n\nThis adds the implementation of archive storage for ES. There are two possible configurations how to use this feature:\n\n* the default - it will store and read archived traces from `jaeger-archive-span` index.\n* using rollover API - it writers spans to an alias `jaeger-span-archive-write` and reads from an alias `jaeger-span-archive-read`. This deployment requires an external component that performs index managent: creating index, creating alias, callig rollover API and managing `max-span-age` by removing indices from read alias. This component is `esRollover.py` available as `jaeger-es-rollover` docker image.\n\nAs the default index name does not collide with aliases used by rollover users can seameasly migrate from default deployment to rollover by putting `jaeger-archive-span` index into the read alias.\n\n### Default use case\nIt does not require any configuration ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is ready for review @jaegertracing/elasticsearch @yurishkuro @objectiser \n\nPlease read the first comment. We could also automatically default to write and read aliases and do not support no ttl use case.\n\nThe question is also who should create aliases. We could embed it in Jaeger, although I am not sure about that approach.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "--es-archive.use-aliases Use read and write aliases for indices. Use this option with Elasticsearch rollover API. It requires an external component to create aliases before startup and then performing its management. Note that es-archive.max-span-age is not taken into the account and has to be substituted by external component managing read alias. (default false)", + "ARCHIVE=true UNIT=seconds python3 esRollover.py init localhost:9200", + "ARCHIVE=true CONDITIONS='{\"max_age\": \"1s\"}' python3 esRollover.py rollover localhost:9200" + ] + } + }, + "metadata": { + "tags": [ + "jaeger", + "graduated", + "observability", + "storage-elasticsearch" + ], + "category": "observability", + "cncfProjects": [ + "jaeger" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/jaegertracing/jaeger/pull/1197", + "sourceRepo": "jaegertracing/jaeger", + "reactions": 1, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:43:27.051Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/jaeger/jaeger-1690-add-elasticsearch-7-support.json b/solutions/cncf-generated/jaeger/jaeger-1690-add-elasticsearch-7-support.json new file mode 100644 index 00000000..a1512baa --- /dev/null +++ b/solutions/cncf-generated/jaeger/jaeger-1690-add-elasticsearch-7-support.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:19.879Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "jaeger: Add Elasticsearch 7 support", + "description": "Adds support for Elasticsearch 7.x\n\nResolves #1474\nDepends on https://github.com/olivere/elastic/pull/1146\n\n- Updates github.com/olivere/elastic to 6.2.21\n- Removes the deprecated _default_ field from mappings\n- Replaces document types with '_doc' as a transition step to removing them entirely\n- Sets include_type_name for compatibility between elasticsearch 6.x and 7.x\n\nrest_total_hits_as_int support is also required for compatibility between elasticsearch 6.x and 7.x (waiting for feature to be merged in github.com/olivere/elastic) https://github.com/olivere/elastic/pull/1146", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Resolves #74 \nBlocked by https://github.com/jaegertracing/jaeger/pull/1690", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/jaegertracing/jaeger/pull/1690?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [plugin/storage/es/spanstore/reader.go](https://codecov.io/gh/jaegertracing/jaeger/pull/1690/diff?src=pr&el=tree#diff-cGx1Z2luL3N0b3JhZ2UvZXMvc3BhbnN0b3JlL3JlYWRlci5nbw==) | `100% <ø> (ø)` | |\n| [plugin/storage/es/spanstore/service\\_operation.go](https://codecov.io/gh/jaegertracing/jaeger/pull/1690/diff?src=pr&el=tree#diff-cGx1Z2luL3N0b3JhZ2UvZXMvc3BhbnN0b3JlL3NlcnZpY2Vfb3BlcmF0aW9uLmdv) | `100% <ø> (ø)` | |\n| [plugin/storage/es/factory.go](https://codecov.io/gh/jaegertracing/jaeger/pull/1690/diff?src=pr&el=tree#diff-cGx1Z2luL3N0b3JhZ2UvZXMvZmFjdG9yeS5nbw==) | `100% <100%> (ø)` | |\n| [plugin/storage/es/dependencystore/schema.go](https://codecov.io/gh/jaegertracing/jaeger/pull/1690/diff?src=pr&el=tree#diff-cGx1Z2luL3N0b3JhZ2UvZXMvZGVwZW5kZW5jeXN0b3JlL3NjaGVtYS5nbw==) | `100% <100%> (ø)` | |\n| [plugin/storage/es/dependencystore/storage.go](https://codecov.io/gh/jaegertracing/jaeger/pull/1690/diff?src=pr&el=tree#diff-cGx1Z2luL3N0b3JhZ2UvZXMvZGVwZW5kZW5jeXN0b3JlL3N0b3JhZ2UuZ28=) | `85.71% <100%> (ø)` | |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/jaegertracing/jaeger/pull/1690?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/jaegertracing/jaeger/pull/1690?src=pr&el=footer). Last update [ecdecd1...22a2332](https://codecov.io/gh/jaegertracing/jaeger/pull/1690?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\n## Elasticsearch 7 documentation\r\n\r\n* https://www.elastic.co/guide/en/elasticsearch/reference/7.x/release-notes-7.0.0.html\r\n* https://www.elastic.co/guide/en/elasticsearch/reference/7.x/breaking-changes-7.0.html\r\n* https://www.elastic.co/guide/en/elasticsearch/reference/current/removal-of-types.html\r\n\r\n## I have tested\r\n\r\n- [x] self-provisioned ES in Jaeger operator\r\n- [x] spark-dependencies - the project will need a fix due to `Caused by: org.elasticsearch.hadoop.EsHadoopIllegalArgumentException: Unsupported/Unknown Elasticsearch version 7.3.0`. https://github.com/jaegertracing/spark-dependencies/issues/74\r\n- [x] Kibana integration with ES7 https://medium.com/jaegertracing/jaeger-elasticsearch-and-kibana-7ecb846137b6 \r\n- [x] Test rollover with ES7\r\n- [x] Test rollover with ES5\r\n\r\n## Data migration from ES6 to ES7\r\n\r\n* https://www.elastic.co/guide/en/elasticsearch/reference/7.2/setup-upgrade.html\r\n* https://www.elastic.co/guide/en/elasticsearch/reference/7.2/reindex-upgrade.html\r\n* https://www.elastic.co/guide/en/elasticsearch/reference/7.2/reindex-upgrade-inplace.html\r\n\r\n> Elasticsearch can read indices created in the previous major version. If you have indices created in 5.x or before, you must reindex or delete them before upgrading to 7.2.1. Elasticsearch nodes will fail to start if incompatible indices are present. Snapshots of 5.x or earlier indices cannot be restored to a 7.x cluster even if they were created by a 6.x cluster.\r\n\r\nI was able to migrate data from ES 5.x up to ES 7.x. I did the following\r\n1. run ES 5.x and store data\r\n2. run ES 6.8 (this does not require any migration)\r\n3. change index template to be ES 7.x compatible, before running the specify the number of shards (number of nodes) and replicas (0).", + "4. reindex all span and service indices to a new index with the corrent mapping. The new indices will have suffix `-1`.", + "5. delete old indices, exlude `-1` indices from deletion" + ] + } + }, + "metadata": { + "tags": [ + "jaeger", + "graduated", + "observability", + "storage-elasticsearch" + ], + "category": "observability", + "cncfProjects": [ + "jaeger" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/jaegertracing/jaeger/pull/1690", + "sourceRepo": "jaegertracing/jaeger", + "reactions": 10, + "comments": 8 + }, + "security": { + "scannedAt": "2026-02-27T17:43:19.879Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/jaeger/jaeger-2454-add-support-for-elasticsearch-ilm-polices.json b/solutions/cncf-generated/jaeger/jaeger-2454-add-support-for-elasticsearch-ilm-polices.json new file mode 100644 index 00000000..e7bd4f43 --- /dev/null +++ b/solutions/cncf-generated/jaeger/jaeger-2454-add-support-for-elasticsearch-ilm-polices.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:23.740Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "jaeger: Add support for Elasticsearch ILM Polices", + "description": "Signed-off-by: santosh \n\n## Which problem is this PR solving?\n- Resolves #2048\n## Short description of the changes\n- Adds support for ILM policies by creating overriding index templates - which assign the ILM policy and rollover alias and read-alias to the index upon creation. Change is made esRollover script to enable ILM using an environment variable USE_ILM to be backwards compatible.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@albertteoh I think you folks run Jaeger with ES, would you like to take a look?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/jaegertracing/jaeger/pull/2454?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [cmd/templatizer/app/renderer/render.go](https://codecov.io/gh/jaegertracing/jaeger/pull/2454/diff?src=pr&el=tree#diff-Y21kL3RlbXBsYXRpemVyL2FwcC9yZW5kZXJlci9yZW5kZXIuZ28=) | `87.50% <87.50%> (ø)` | |\n| [plugin/storage/es/factory.go](https://codecov.io/gh/jaegertracing/jaeger/pull/2454/diff?src=pr&el=tree#diff-cGx1Z2luL3N0b3JhZ2UvZXMvZmFjdG9yeS5nbw==) | `98.16% <94.44%> (-1.84%)` | :arrow_down: |\n| [cmd/templatizer/app/flags.go](https://codecov.io/gh/jaegertracing/jaeger/pull/2454/diff?src=pr&el=tree#diff-Y21kL3RlbXBsYXRpemVyL2FwcC9mbGFncy5nbw==) | `100.00% <100.00%> (ø)` | |\n| [pkg/es/textTemplate.go](https://codecov.io/gh/jaegertracing/jaeger/pull/2454/diff?src=pr&el=tree#diff-cGtnL2VzL3RleHRUZW1wbGF0ZS5nbw==) | `100.00% <100.00%> (ø)` | |\n| [plugin/storage/es/options.go](https://codecov.io/gh/jaegertracing/jaeger/pull/2454/diff?src=pr&el=tree#diff-cGx1Z2luL3N0b3JhZ2UvZXMvb3B0aW9ucy5nbw==) | `100.00% <100.00%> (ø)` | |\n| [plugin/storage/es/spanstore/writer.go](https://codecov.io/gh/jaegertracing/jaeger/pull/2454/diff?src=pr&el=tree#diff-cGx1Z2luL3N0b3JhZ2UvZXMvc3BhbnN0b3JlL3dyaXRlci5nbw==) | `100.00% <100.00%> (ø)` | |\n| [cmd/query/app/server.go](https://codecov.io/gh/jaegertracing/jaeger/pull/2454/diff?src=pr&el=tree#diff-Y21kL3F1ZXJ5L2FwcC9zZXJ2ZXIuZ28=) | `95.68% <0.00%> (-1.44%)` | :arrow_down: |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/jaegertracing/jaeger/pull/2454?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/jaegertracing/jaeger/pull/2454?src=pr&el=footer). Last update [2ff0a3d...a3b465b](https://codecov.io/gh/jaegertracing/jaeger/pull/2454?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\n@albertteoh Forgot to mention one more usecase - \r\n\r\nTwo Jaeger Installations ( mostly belonging to two environments - e.g. uat dev ) using same elasticsearch backend.\r\nFor rollover/ILM to work correctly would need two different index templates for both of them ( because both should use different index prefix for aliases and indices). In esRollover - I made changes to add index prefix to index template id (name).", + "Now that we decided not to have multiple templates and not to use order - we should do the same thing while creating index templates in `writer.go` -\r\n\r\nHere - \r\nhttps://github.com/jaegertracing/jaeger/blob/1b209472dc6a35f7de90c7a9048cb4e960cc2335/plugin/storage/es/spanstore/writer.go#L92\r\nand here - \r\nhttps://github.com/jaegertracing/jaeger/blob/1b209472dc6a35f7de90c7a9048cb4e960cc2335/plugin/storage/es/spanstore/writer.go#L96\r\n\r\nI propose we change it to -", + "> @albertteoh Forgot to mention one more usecase -\r\n> \r\n> Two Jaeger Installations ( mostly belonging to two environments - e.g. uat dev ) using same elasticsearch backend.\r\n> For rollover/ILM to work correctly would need two different index templates for both of them ( because both should use different index prefix for aliases and indices). In esRollover - I made changes to add index prefix to index template id (name).\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "jaeger", + "graduated", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "jaeger" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/jaegertracing/jaeger/pull/2454", + "sourceRepo": "jaegertracing/jaeger", + "reactions": 2, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:43:23.740Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/jaeger/jaeger-2948-add-s390x-support-on-multiarch-docker-images.json b/solutions/cncf-generated/jaeger/jaeger-2948-add-s390x-support-on-multiarch-docker-images.json new file mode 100644 index 00000000..4c842846 --- /dev/null +++ b/solutions/cncf-generated/jaeger/jaeger-2948-add-s390x-support-on-multiarch-docker-images.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:24.780Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "jaeger: Add s390x support on multiarch docker images", + "description": "This change is to add s390x support on multiarch Jaeger docker images\n\nSigned-off-by: Kun-Lu \n\n## Which problem is this PR solving?\n- Fixes #2292 \n\n## Short description of the changes\n- modifies CI workflows (`ci-all-in-one-build.yml`, `ci-docker-build.yml`, `ci-release.yml`) to build and publish multi-arch docker images on `docker.io` and `quay.io`.\n- adds `s390x` support on multi-arch docker images of `baseimg`, `all-in-one` and all the Jaeger components, except debug images and `Jaeger-Cassandra-Schema` whose base image doesn’t have `s390x` support yet.\n- uses local registry service to hold: \n 1. multi-arch `baseimg`,\n 2. multi-arch `all-in-one` image for local integration test.\n- updates `Makefile` and `scripts/build-all-in-one-image.sh`, adds `scripts/build-upload-docker-images.sh` to use `docker buildx` to build and publish multi-arch docker images.\n- removes `scripts/upload-all-docker-images.sh` since its uploading function has been implemented in `scripts/", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@kun-lu20 could you please describe what kind of testing you did to make sure this all works? Ideally you would run this in your fork with a tag and override the target repos in the registries to your private ones, so that the end result are the images pushed to registries that can be validated.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "jaeger", + "graduated", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "jaeger" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/jaegertracing/jaeger/pull/2948", + "sourceRepo": "jaegertracing/jaeger", + "reactions": 1, + "comments": 24 + }, + "security": { + "scannedAt": "2026-02-27T17:43:24.780Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/jaeger/jaeger-2966-yaaaas-yet-another-attempt-at-adaptive-sampling.json b/solutions/cncf-generated/jaeger/jaeger-2966-yaaaas-yet-another-attempt-at-adaptive-sampling.json new file mode 100644 index 00000000..d4418c4a --- /dev/null +++ b/solutions/cncf-generated/jaeger/jaeger-2966-yaaaas-yet-another-attempt-at-adaptive-sampling.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:21.021Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "jaeger: YAAAAS - Yet Another Attempt At Adaptive Sampling", + "description": "## Which problem is this PR solving?\n- Fixes #365 \n\n## Short description of the changes\nThis PR builds on #2818 and as such @Ashmita152 is named as a coauthor. Currently it only wires config for adaptive sampling to the existing Cassandra storage implementations. \n\n## Additional changes\n- Adjusts hotrod jaeger tracer creation to allow configuration through env vars \n- Makes changes to ./docker-compose/jaeger-docker-compose.yaml to test adaptive sampling with hotrod.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Let me move this a little farther forward before a first review. I have everything wired up but I'd like to see it work at least a little before we start reviews.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Are we saving throughput data as some sort of CSV format? Maybe worth converting to JSON before we commit, because JSON can be further extended in the future, but CSV would make any extension difficult.\n> Are we saving throughput data as some sort of CSV format? Maybe worth converting to JSON before we commit, because JSON can be further extended in the future, but CSV would make any extension difficult.\r\n\r\nYes, that code is here:\r\nhttps://github.com/jaegertracing/jaeger/blame/master/plugin/storage/cassandra/samplingstore/storage.go#L138\r\n\r\nThere are two sets of methods to translate between strings and probabilities/throughput. I don't know enough about the original implementation to comment on why csv was chosen. json would be heavier weight to parse/store if there were a large number of operations to store, but I can look into it.\r\n\r\n**Edit/Update:**\r\n[ServiceOperationProbabilities](https://github.com/jaegertracing/jaeger/blob/master/cmd/collector/app/sampling/model/sampling.go#L28) efficiently marshals to json only increasing the total size of data by a few bytes.\r\n\r\nHowever, [Throughput](https://github.com/jaegertracing/jaeger/blob/master/cmd/collector/app/sampling/model/sampling.go#L19) sees a significant increase in size required to store due to the repeated field names. In the naive case (not setting field names) this is a 4-5x increase in string length.\r\n\r\n\r\n\nLinting is failing with:\r\n\r\n`[/home/runner/work/jaeger/jaeger/pkg/hostname/hostname.go:18] - G404 (CWE-338): Use of weak random number generator (math/rand instead of crypto/rand) (Confidence: MEDIUM, Severity: HIGH)`\n@joe-elliott, I see in a test here a reference to the flag `--sampling.initial-sampling-probability`, but I can't seem to be able to use it with this PR:", + "Would you mind writing a couple of lines describing how I can test this?\n> Would you mind writing a couple of lines describing how I can test this?\r\n\r\nIn the case of the above lines you simply need to define the sampling type (default is static):", + "Also please review `/docker-compose/jaeger-docker-compose.yml`. I made some small changes here to run a full jaeger stack with hotrod and cassandra setup to use adaptive sampling.\nJust tried this out, and looks good, but I got a question about this:" + ] + } + }, + "metadata": { + "tags": [ + "jaeger", + "graduated", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "jaeger" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/jaegertracing/jaeger/pull/2966", + "sourceRepo": "jaegertracing/jaeger", + "reactions": 7, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:43:21.021Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/jaeger/jaeger-4829-support-elasticsearch-8-x.json b/solutions/cncf-generated/jaeger/jaeger-4829-support-elasticsearch-8-x.json new file mode 100644 index 00000000..31890624 --- /dev/null +++ b/solutions/cncf-generated/jaeger/jaeger-4829-support-elasticsearch-8-x.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:22.659Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "jaeger: Support Elasticsearch 8.x", + "description": "## Which problem is this PR solving?\n- Resolves #3571 \n\n## Description of the changes\n- Added index templates for esv8\n- Added a esv8 client to handle the put index template request that is not currently possible by olivere/elastic\n- After these changes are merged , users no more need to use create.index-templates= false unless they want to add their custom index templates and force the es.version = 7", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Which problem is this PR solving?\n- Follow-up to #4829 where the integration test is using non-default template priorities\n\n## Description of the changes\n- Remove priorities overrides, it must work with defaults because this is how most users would run.\n\n## How was this change tested?\n- CI should succeed", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Unknown setting [xpack.monitoring.enabled] did you mean any of [xpack.profiling.enabled, xpack.monitoring.templates.enabled]?", + "if (( major_version < 8 )); then\r\n params+=(--env \"xpack.monitoring.enabled=false\")\r\nelse\r\n params+=(--env \"xpack.monitoring.collection.enabled=false\")\r\nfi" + ] + } + }, + "metadata": { + "tags": [ + "jaeger", + "graduated", + "observability", + "changelog-new-feature" + ], + "category": "observability", + "cncfProjects": [ + "jaeger" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/jaegertracing/jaeger/pull/4829", + "sourceRepo": "jaegertracing/jaeger", + "reactions": 7, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:43:22.659Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k0s/k0s-1038-add-spec-api-bindaddress-configuration.json b/solutions/cncf-generated/k0s/k0s-1038-add-spec-api-bindaddress-configuration.json new file mode 100644 index 00000000..eed16800 --- /dev/null +++ b/solutions/cncf-generated/k0s/k0s-1038-add-spec-api-bindaddress-configuration.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:57.424Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k0s: Add spec.api.bindAddress configuration", + "description": "Should a new `bindAddress` option be created and change documentation for the existing `address` option, or should the existing `address` option's behavior be changed to match the existing documentation?\n\nThe later I can see causing some issues with existing setups, `bindAddress` is probably the safest route. Currently the default for `bindAddress` is \"0.0.0.0\", which is the default kube-api-server uses if the `--bind-address` flag isn't used.\n\n**Issue**\nFixes #957\n\n**What this PR Includes**\nAdds the ability to define a `bindAddress` for the kube-api-server.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I'd prefer to use separate `bindAddress` option as more safe route for this", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "k0s", + "sandbox", + "orchestration", + "merge-conflict" + ], + "category": "troubleshooting", + "cncfProjects": [ + "k0s" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/k0sproject/k0s/pull/1038", + "sourceRepo": "k0sproject/k0s", + "reactions": 2, + "comments": 30 + }, + "security": { + "scannedAt": "2026-02-27T17:48:57.424Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k0s/k0s-1165-add-support-for-external-etcd-cluster.json b/solutions/cncf-generated/k0s/k0s-1165-add-support-for-external-etcd-cluster.json new file mode 100644 index 00000000..2e72efeb --- /dev/null +++ b/solutions/cncf-generated/k0s/k0s-1165-add-support-for-external-etcd-cluster.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:59.355Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k0s: Add support for external etcd cluster", + "description": "Signed-off-by: Jacek Ewertowski \n\n**Issue**\nFixes #1010\n\n**What this PR Includes**\nThis PR aims to enable users to use external etcd cluster as a storage for k0s. To do that a user has to define the following configuration:\n```\n storage:\n etcd:\n externalCluster:\n endpoints:\n - http://192.168.10.1:2379\n - http://192.168.10.2:2379\n - http://192.168.10.3:2379\n etcdPrefix: k0s-tenant\n type: etcd\n```\nField `endpoints` contains list of URLs that listen on for client requests. `etcdPrefix` is used to enable multi-tenancy in etcd and specifies the name of the root path which by default is `/registry`; this value will be used to pass as `--etcd-prefix` argument in kube-apiserver.\n\n**Changes**\n- Extend `ClusterConfig` CRD with `etcd.externalCluster` option.\n\n**TODO**:\n- [x] Skip running internal etcd if `etcd.externalCluster` is defined.\n- [x] Configure external endpoints in `EtcdClient`.\n- [x] Run kube-apiserver with co", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> I think that internal etcd should be managed only by k0s to keep it as simple as possible.\n\nI agree with this, there's no need to complicate things for the k0s managed etcd case.\n\n> I think prefix and clientCert can be there for the internal etcd too.\n\nprefix maybe. client cert really by itself not, as k0s managed etcd will need lot more certs than only the client cert", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "storage:\r\n etcd:\r\n externalCluster:\r\n endpoints:\r\n - http://192.168.10.1:2379\r\n - http://192.168.10.2:2379\r\n - http://192.168.10.3:2379\r\n etcdPrefix: k0s-tenant\r\n type: etcd", + "storage:\r\n type: etcd\r\n etcd:\r\n externalCluster: null\r\n peerAddress: 192.168.68.104", + "storage:\r\n type: etcd\r\n etcd:\r\n externalCluster:\r\n endpoints:\r\n - http://192.168.68.104:2379\r\n - http://192.168.68.105:2379\r\n etcdPrefix: k0s-tenant\r\n caFile: /etc/pki/CA/ca.crt\r\n clientCertFile: /etc/pki/tls/certs/etcd-client.crt\r\n clientKeyFile: /etc/pki/tls/private/etcd-client.key\r\n peerAddress: 192.168.68.104" + ] + } + }, + "metadata": { + "tags": [ + "k0s", + "sandbox", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "k0s" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/k0sproject/k0s/pull/1165", + "sourceRepo": "k0sproject/k0s", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:48:59.356Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k0s/k0s-1880-add-config-option-to-set-bind-address-for-all-components.json b/solutions/cncf-generated/k0s/k0s-1880-add-config-option-to-set-bind-address-for-all-components.json new file mode 100644 index 00000000..262bf041 --- /dev/null +++ b/solutions/cncf-generated/k0s/k0s-1880-add-config-option-to-set-bind-address-for-all-components.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:56.036Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k0s: Add config option to set bind address for all components.", + "description": "Fixes #1150 , the ability to use VIP+LB on controller nodes.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "`check-tunneledkas` smoke test failing as my custom branch for konnectivity bind-address does not have the tunneling built in", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "k0s", + "sandbox", + "orchestration", + "merge-conflict" + ], + "category": "troubleshooting", + "cncfProjects": [ + "k0s" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/k0sproject/k0s/pull/1880", + "sourceRepo": "k0sproject/k0s", + "reactions": 7, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:48:56.037Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k0s/k0s-924-add-command-k0s-ctr-command-flags-args.json b/solutions/cncf-generated/k0s/k0s-924-add-command-k0s-ctr-command-flags-args.json new file mode 100644 index 00000000..a35993d2 --- /dev/null +++ b/solutions/cncf-generated/k0s/k0s-924-add-command-k0s-ctr-command-flags-args.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:58.330Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k0s: Add command 'k0s ctr [command] [flags] [args]'", + "description": "**Issue**\nFixes #856 \n\n**What this PR Includes**\nThis PR adds command `ctr`, which is containerd CLI.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "If you want to verify if it works, execute the commands below to see that subcommands, flags and arguments work properly.\n```\nk0s ctr images list\nk0s ctr image pull docker.io/calico/node:v3.11.2\nk0s ctr images label docker.io/calico/node:v3.11.2 app=cni\nk0s ctr images label --replace-all docker.io/calico/node:v3.11.2 cni=calico\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "k0s ctr images list\r\nk0s ctr image pull docker.io/calico/node:v3.11.2\r\nk0s ctr images label docker.io/calico/node:v3.11.2 app=cni\r\nk0s ctr images label --replace-all docker.io/calico/node:v3.11.2 cni=calico", + "# ./k0s ctr -a /run/containerd/containerd.sock i list \r\nError: unknown shorthand flag: 'a' in -a", + "# ctr -a /run/containerd/containerd.sock i list\r\nREF TYPE DIGEST SIZE PLATFORMS LABELS" + ] + } + }, + "metadata": { + "tags": [ + "k0s", + "sandbox", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "k0s" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/k0sproject/k0s/pull/924", + "sourceRepo": "k0sproject/k0s", + "reactions": 2, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:48:58.330Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k3s/k3s-141-preload-a-docker-image-on-the-k3s-node-agents.json b/solutions/cncf-generated/k3s/k3s-141-preload-a-docker-image-on-the-k3s-node-agents.json new file mode 100644 index 00000000..f89d4d87 --- /dev/null +++ b/solutions/cncf-generated/k3s/k3s-141-preload-a-docker-image-on-the-k3s-node-agents.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:03.225Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k3s: preload a docker image on the k3s node agents", + "description": "This PR adds preloading existing container images ( `docker save` format ) from `/var/lib/rancher/k3s/agent/images`\n\nFixes #92", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Confirmed that this version is working for my use case in #92", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ sudo k3s crictl images\r\nIMAGE TAG IMAGE ID SIZE\r\ndocker.io/library/myimage latest 652014e0a66b3 82MB" + ] + } + }, + "metadata": { + "tags": [ + "k3s", + "sandbox", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "k3s" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/k3s-io/k3s/pull/141", + "sourceRepo": "k3s-io/k3s", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:49:03.225Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k3s/k3s-3049-add-dual-stack-support-to-k3s.json b/solutions/cncf-generated/k3s/k3s-3049-add-dual-stack-support-to-k3s.json new file mode 100644 index 00000000..edb095d4 --- /dev/null +++ b/solutions/cncf-generated/k3s/k3s-3049-add-dual-stack-support-to-k3s.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:02.099Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k3s: Add dual stack support to K3S", + "description": "Problem: Although Kubernetes does support dual stack, K3S does not\n\nSolution: Modify K3S so it can both support parameters for dual stack,\nand pass them to the different Kubernetes daemons\n\nSigned-off-by: José Luis Ledesma \n\n#### Proposed Changes ####\n\nAdd dual stack support to K3S\n\n#### Types of Changes ####\n\nNew Feature\n\n#### Verification ####\nYou'll need to start K3S with the `DualStack` feature flag. Also you'll need a different CNI than flannel, because it does not support dual stack (yet). I used Cilium and it worked fine. I start the k3s server like this:\n```\n/usr/local/bin/k3s server --disable traefik –-flannel-backend=none --disable-network-policy --disable servicelb --no-flannel --disable-kube-proxy \\\n\t\t--node-ip \\\n\t\t--node-ip \\\n\t\t--kube-apiserver-arg 'feature-gates=IPv6DualStack=true' \\\n\t\t--kube-controller-manager-arg 'feature-gates=IPv6DualStack=true' \\\n\t\t--kubelet-arg 'feature-gates=IPv6DualStack=true' \\\n\t\t--cluster-cidr '<", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Add dual stack support for flannel, it provided with three mode for flannel:\n1. Only ipv4 stack as the origin.\n2. Only ipv6 stack\n3. Dual stack \n\nAdd new option for flannel daemon to support dual stack:\n- \"publicIPv6\": \"IPv6 accessible by other nodes for\n inter-host communication\"\n- \"auto-detect-ipv4\": \"auto detect ipv4 address of the iface\",\n default value is true.\n- \"auto-detect-ipv6\": \"auto detect ipv6 address of the iface\",\n default value is false\n\nAdd new option into `net-conf.json` configuration, like following:\n```\n{\n \"EnableIPv4\": true,\n \"EnableIPv6\": true,\n \"Network\": \"172.16.0.0/16\",\n \"IPv6Network\": \"fc00::/48\",\n \"Backend\": {\n \"Type\": \"vxlan\"\n }\n}\n```\n`EnableIPv4` default value is true for useing kube subnet manager.\n`EnableIpv6` default value is false.\n\nFlannel dual stack feature has limitation, only work with vxlan backend\nand kube subnet manager now. To enable flannel dual stack feature, need\nto do the following step:\n1. setting flanneld daemon with `--kube-subnet-mgr --auto-detect-ipv6`\n2. settting `EnableIPv6` and `IPv6Network` in `net-conf.json`, like the\n above configuration.\n3. setting network interface that flannel used ipv6 address and\ndefault ipv6 gateway in the host node.\n4. vxlan support ipv6 tunnel require kernel version >= 3.12.\n\nIt also need flannel cni plugin to support dual stack ip allocation, so it \ndepends on: [https://github.com/containernetworking/plugins/pull/570](https://github.com/containernetworking/plugins/pull/570 )\n\n#248\n", + "steps": [ + "Only ipv4 stack as the origin.", + "Only ipv6 stack", + "Dual stack", + "setting flanneld daemon with `--kube-subnet-mgr --auto-detect-ipv6`", + "settting `EnableIPv6` and `IPv6Network` in `net-conf.json`, like the", + "setting network interface that flannel used ipv6 address and", + "vxlan support ipv6 tunnel require kernel version >= 3.12." + ], + "codeSnippets": [ + "/usr/local/bin/k3s server --disable traefik –-flannel-backend=none --disable-network-policy --disable servicelb --no-flannel --disable-kube-proxy \\\r\n\t\t--node-ip \\\r\n\t\t--node-ip \\\r\n\t\t--kube-apiserver-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--kube-controller-manager-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--kubelet-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--cluster-cidr ',' \\\r\n\t\t--service-cidr ',' \\", + "{\r\n \"EnableIPv4\": true,\r\n \"EnableIPv6\": true,\r\n \"Network\": \"172.16.0.0/16\",\r\n \"IPv6Network\": \"fc00::/48\",\r\n \"Backend\": {\r\n \"Type\": \"vxlan\"\r\n }\r\n}", + "enable-ipv6: \"true\"\r\n cluster-pool-ipv6-cidr: \"fd00:5000::/64\" \r\n cluster-pool-ipv6-mask-size: \"64\"" + ] + } + }, + "metadata": { + "tags": [ + "k3s", + "sandbox", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "k3s" + ], + "targetResourceKinds": [ + "Service", + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/k3s-io/k3s/pull/3049", + "sourceRepo": "k3s-io/k3s", + "reactions": 3, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:49:02.099Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k3s/k3s-4725-setup-ip6-masquarading-rules-if-for-local-subnets.json b/solutions/cncf-generated/k3s/k3s-4725-setup-ip6-masquarading-rules-if-for-local-subnets.json new file mode 100644 index 00000000..c64963c5 --- /dev/null +++ b/solutions/cncf-generated/k3s/k3s-4725-setup-ip6-masquarading-rules-if-for-local-subnets.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:05.700Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k3s: Setup ip6 masquarading rules if for local subnets", + "description": "When the IPv6 CIDR is in the unique local address range setup ipv6\nmasquarading, otherwise assume the ipv6 network is globally routed.\n\nSigned-off-by: Sjoerd Simons \n\n#### Proposed Changes ####\n\nCurrently when configure a dual-stack cluster it seems somewhat assumed that the cluster cidr is a public range otherwise the pods don't have ipv6 internet connectivity. Adjust it so that when the cluster cidr is not public the pods get masquarading for ipv6 as well similar to what happens for ipv4\n\n#### Types of Changes ####\n\nbugfix\n#### Verification ####\n\nSetup a dual-stacked cluster with a private ipv6 range (e.g. `fd00::/56`) and try to access public ipv6 service from a pod\n\n#### Linked Issues ####\n\nfixes #4683\n\n#### User-Facing Change ####\n\n```release-note\nDual-stacked clusters using a private ipv6 range will now automatically have ipv6 masquarding available\n```\n\n#### Further Comments ####\n\nThis requires flannel-io/flannel#1513", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "#### Proposed Changes ####\n\nSwitch to the native wireguard support now available in flannel; This also allows supporting dual-stacked clusters with wireguard\n\n#### Types of Changes ####\n\nnew feature\n\n#### Verification ####\n\nUses Setup a dual-stack k3s cluster with the flannel wireguard backend\n\n#### Linked Issues ####\n\n#4364 \n\n#### User-Facing Change ####\n```release-note\nk3s now uses the native Flannel wireguard backend. Not that this is incompatible with the previous extension based wireguard support so you cannot mix nodes with the old and new wireguard connection method \n```\n\n#### Further Comments ####\n\nKeeping it as a draft as it relies on an unreleased version of flannel as well as including some k3s patches that have been submitted seperately (#4724, #4725 ). However this reflects the state i've been testing with.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "#### Further Comments ####\r\n\r\nThis requires flannel-io/flannel#1513 \n#### Proposed Changes ####\r\n\r\nSwitch to the native wireguard support now available in flannel; This also allows supporting dual-stacked clusters with wireguard\r\n\r\n#### Types of Changes ####\r\n\r\nnew feature\r\n\r\n#### Verification ####\r\n\r\nUses Setup a dual-stack k3s cluster with the flannel wireguard backend\r\n\r\n#### Linked Issues ####\r\n\r\n#4364 \r\n\r\n#### User-Facing Change ####" + ] + } + }, + "metadata": { + "tags": [ + "k3s", + "sandbox", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "k3s" + ], + "targetResourceKinds": [ + "Pod", + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/k3s-io/k3s/pull/4725", + "sourceRepo": "k3s-io/k3s", + "reactions": 1, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:49:05.700Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k3s/k3s-5793-enable-full-offline-source-build.json b/solutions/cncf-generated/k3s/k3s-5793-enable-full-offline-source-build.json new file mode 100644 index 00000000..747e0428 --- /dev/null +++ b/solutions/cncf-generated/k3s/k3s-5793-enable-full-offline-source-build.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:04.322Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k3s: Enable full offline source build", + "description": "#### Proposed Changes ####\n\nCurrently it is not possible to build k3s from source due to the build scripts mixing the stages of download and build.\n\n#### Types of Changes ####\n\nI would consider this a bugfix as this was possible in the past.\n\n#### Verification ####\n\nThe build in the OBS (which doesn't allow network access) succeeds: https://build.opensuse.org/package/show/home:SchoolGuy:branches:devel:kubic/k3s?expand=0\n\n#### Testing ####\n\nNo we don't have testing but a CI/CD pipeline could be added via the OBS CI/CD Beta features.\n\n#### Linked Issues ####\n\nFixes #5618\n\n#### User-Facing Change ####\n```release-note\nNone\n```\n\n#### Further Comments ####\n\nThis PR is atm a WIP. I opened it to provide a place for discussion of my current implementation. I haven't yet figured out how I can slim down the source tarball. Because it is so huge I wasn't able to upload it to OBS in a single file. I bet that I could skip some parts of the git history to safe space.\n\nOpen points include but are not ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "We don't really want to encourage Linux distros to build and package K3s in their own build systems. We support running K3s from the binaries that are released here on Github. If folks start building and offering their own versions of K3s, those are essentially other Kubernetes distros downstream from K3s. \n\nWe already had problems with one distro in particular stripping out the bundled containerd and userspace, resulting in a version of K3s that did not behave at all like our shipping releases. I know that's not what you're attempting to do here, but we want to take a firm line on our support boundary.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "k3s", + "sandbox", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "k3s" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/k3s-io/k3s/pull/5793", + "sourceRepo": "k3s-io/k3s", + "reactions": 2, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:49:04.322Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k8gb/k8gb-1064-helm-chart-updated-for-externaldns-rfc2136-provider-in-order-to-suppor.json b/solutions/cncf-generated/k8gb/k8gb-1064-helm-chart-updated-for-externaldns-rfc2136-provider-in-order-to-suppor.json new file mode 100644 index 00000000..b9dbc366 --- /dev/null +++ b/solutions/cncf-generated/k8gb/k8gb-1064-helm-chart-updated-for-externaldns-rfc2136-provider-in-order-to-suppor.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:11.358Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k8gb: Helm chart updated for ExternalDNS, rfc2136 provider, in order to support GSS-TSIG authentication configuration.", + "description": "Changed helm template for external-dns as well as the values.yaml and values.schema.json to support several authentication types in the provider RFC2136.\n\nThe previous version only supported RFC2136 configuration for TSIG, which doesn't work with Windows DNS.\n\nFixes https://github.com/k8gb-io/k8gb/issues/1061 \nFixes https://github.com/k8gb-io/k8gb/issues/929", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "@v-esteves Is it fully superseded by #1065 ?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "panic: test timed out after 15m0s\r\n\r\ngoroutine 2511 [running]:\r\ntesting.(*M).startAlarm.func1()\r\n/opt/hostedtoolcache/go/1.19.1/x64/src/testing/testing.go:2036 +0x8e\r\ncreated by time.goFunc\r\n/opt/hostedtoolcache/go/1.19.1/x64/src/time/sleep.go:176 +0x32" + ] + } + }, + "metadata": { + "tags": [ + "k8gb", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "k8gb" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/k8gb-io/k8gb/pull/1064", + "sourceRepo": "k8gb-io/k8gb", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:48:11.358Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k8sgpt/k8sgpt-269-feat-running-local-models.json b/solutions/cncf-generated/k8sgpt/k8sgpt-269-feat-running-local-models.json new file mode 100644 index 00000000..204e2def --- /dev/null +++ b/solutions/cncf-generated/k8sgpt/k8sgpt-269-feat-running-local-models.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:13.692Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k8sgpt: feat: running local models", + "description": "~~This is an untested draft still~~ It works! see my comment below. This should be enough for testing on an OpenAI compatible endpoint just by letting the user change the base_url, so should work as well with https://github.com/go-skynet/LocalAI\n\nI'll experiment a bit locally and refine this, adding docs too. cc: @arbreezy @AlexsJones \n\nCloses #188 \n\n## 📑 Description\n\n## ✅ Checks\n\n- [x] My pull request adheres to the code style of this project\n- [x] My code requires changes to the documentation\n- [ ] I have updated the documentation as required\n- [ ] All the tests have passed\n\n## ℹ Additional Information", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Looks very exciting @mudler , @matthisholleville @arbreezy perhaps we use this as another reason to have a configuration Object for AI rather than passing more strings. Also @mudler I think having your own AI struct might be easier ( yes its a little more work, but it eventually means less manipulation of the openai.go)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Service has not ready endpoints, pods: [Pod/llama-69b7785db9-rlv9g], expected 1", + "kubectl apply -f https://raw.githubusercontent.com/Homebrew/kubeadm/main/examples/deploy-pod.yaml", + "kubectl apply -f https://raw.githubusercontent.com/Homebrew/kubeadm/main/examples/update-endpoints.yaml" + ] + } + }, + "metadata": { + "tags": [ + "k8sgpt", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "k8sgpt" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/k8sgpt-ai/k8sgpt/pull/269", + "sourceRepo": "k8sgpt-ai/k8sgpt", + "reactions": 3, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:48:13.692Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k8sgpt/k8sgpt-318-feat-add-helm-chart.json b/solutions/cncf-generated/k8sgpt/k8sgpt-318-feat-add-helm-chart.json new file mode 100644 index 00000000..222aacc0 --- /dev/null +++ b/solutions/cncf-generated/k8sgpt/k8sgpt-318-feat-add-helm-chart.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:15.432Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "k8sgpt: feat: add helm chart", + "description": "## 📑 Description\nReplace static manifests with a Helm chart & update Makefile accordingly\n\nCloses #307", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "#### What this PR does / why we need it:\n\nHelm chart for: [k8sgpt](https://github.com/k8sgpt-ai/k8sgpt)\n\n#### Which issue this PR fixes\n*(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*\n - fixes #\n\n#### Special notes for your reviewer:\n\n#### Checklist\n\n- [x] Title of the PR starts with chart name (e.g. `[portefaix-kyverno]`)\n- [x] Documentation has been updated with helm-docs (run: `.github/helm-docs.sh`)\n- [x] Chart Version bumped\n- [x] `ChangeLog.md` has beed updated\n- [x] Variables are documented in the `README.md`", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: apps/v1\r\nkind: Deployment\r\nmetadata:\r\n name: k8sgpt\r\n namespace: \"default\"\r\n labels:\r\n helm.sh/chart: k8sgpt-1.0.0\r\n app.kubernetes.io/name: k8sgpt\r\n app.kubernetes.io/instance: k8sgpt\r\n app.kubernetes.io/managed-by: Helm\r\n app.kubernetes.io/version: \"0.2.4\"\r\n app.k8sgpt.image: \"ghcr.io/k8sgpt-ai/k8sgpt:v0.2.4\"", + "> apiVersion: apps/v1\r\n> kind: Deployment\r\n> metadata:\r\n> name: k8sgpt\r\n> namespace: \"default\"\r\n> labels:\r\n> helm.sh/chart: k8sgpt-1.0.0\r\n> app.kubernetes.io/name: k8sgpt\r\n> app.kubernetes.io/instance: k8sgpt\r\n> app.kubernetes.io/managed-by: Helm\r\n> app.kubernetes.io/version: \"0.2.4\"\r\n> app.k8sgpt.image: \"ghcr.io/k8sgpt-ai/k8sgpt:v0.2.4\"\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "k8sgpt", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "k8sgpt" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/k8sgpt-ai/k8sgpt/pull/318", + "sourceRepo": "k8sgpt-ai/k8sgpt", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:15.432Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kagent/kagent-1136-allow-cross-namespace-tool-references.json b/solutions/cncf-generated/kagent/kagent-1136-allow-cross-namespace-tool-references.json new file mode 100644 index 00000000..a7bef651 --- /dev/null +++ b/solutions/cncf-generated/kagent/kagent-1136-allow-cross-namespace-tool-references.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:19.061Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kagent: Allow cross-namespace tool references", + "description": "Opening in draft for now so I can get some :eyes: on this and hopefully start a discussion as to how we can get this feature in place. The code as it stands now, works, based on my local testing. However, I understand that there may be some security concerns here - particularly since namespaces are often used for tenant isolation. Do these need to be addressed now? And if so, does anyone got any thoughts on ways to tackle this? I would personally look towards Gateway API for inspiration if needed.\n\nCloses #841", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hey @onematchfox, thanks for opening this. The reason we don't support this today is that tools, specifically `RemoteMCPServer`, have access to secrets which are loaded into the agent. Headers from secrets is the main one that I can think of right now. \n\nI agree that we should look towards the Gateway API for solutions to namespace isolation. In particular we could use the [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/api-types/referencegrant/) or a similar mechanism. \n\nI definitely don't want to create too much friction, but I want to make sure the API is secure by default.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: kagent.dev/v1alpha2\r\nkind: RemoteMCPServer\r\nmetadata:\r\n name: kagent-tool-server\r\n namespace: kagent\r\nspec:\r\n description: Official KAgent tool server\r\n protocol: STREAMABLE_HTTP\r\n sseReadTimeout: 5m0s\r\n terminateOnClose: true\r\n timeout: 30s\r\n url: http://kagent-tools.kagent:8084/mcp\r\n allowedAgents:\r\n namespaces:\r\n from: All" + ] + } + }, + "metadata": { + "tags": [ + "kagent", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kagent" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kagent-dev/kagent/pull/1136", + "sourceRepo": "kagent-dev/kagent", + "reactions": 1, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:19.061Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kagent/kagent-322-fix-ui-add-support-for-ollama-model-tags-in-the-ui.json b/solutions/cncf-generated/kagent/kagent-322-fix-ui-add-support-for-ollama-model-tags-in-the-ui.json new file mode 100644 index 00000000..25765f04 --- /dev/null +++ b/solutions/cncf-generated/kagent/kagent-322-fix-ui-add-support-for-ollama-model-tags-in-the-ui.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:20.173Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kagent: fix(ui): add support for Ollama model tags in the UI", + "description": "This PR /resolves #307 issue. It adds ability to specify custom model tags when creating or editing Ollama model configurations in the UI\n\nChanges:\n- Added a \"Model Tag\" input field that appears only when Ollama provider is selected\n- Default tag value is set to \"latest\" when not specified\n- Automatically includes the tag in the model name when it's different from \"latest\"\n- Sends the model name to the API in the format model:tag when a custom tag is specified\n- Stores the tag value in the model parameters for reference\n- Updates the auto-generated model resource name to include the tag for better identification\n- The tag input helps users specify which version of an Ollama model they want to use, making it easier to work with different versions of the same model.\n\nThe tag input helps users specify which version of an Ollama model they want to use, making it easier to work with different versions of the same model.\n\n> [!IMPORTANT]\n> This change currently **prevents updating** the model", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "another issue I noticed -- looks like the \"0.5b\" tag is applied twice to the name (I think this will be probably resolved once you switch to the method to create valid name).\n\n\"Screenshot", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: kagent.dev/v1alpha1\r\n kind: ModelConfig\r\n metadata:\r\n name: ollama-dolphin-mistral-70b\r\n namespace: kagent\r\n spec:\r\n apiKeySecretKey: \"\"\r\n apiKeySecretRef: \"\"\r\n model: dolphin-mistral:70b\r\n ollama: {}\r\n provider: Ollama" + ] + } + }, + "metadata": { + "tags": [ + "kagent", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kagent" + ], + "targetResourceKinds": [ + "Secret", + "Namespace" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kagent-dev/kagent/pull/322", + "sourceRepo": "kagent-dev/kagent", + "reactions": 0, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:48:20.173Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kairos/kairos-587-seedling-webui-installer.json b/solutions/cncf-generated/kairos/kairos-587-seedling-webui-installer.json new file mode 100644 index 00000000..50109830 --- /dev/null +++ b/solutions/cncf-generated/kairos/kairos-587-seedling-webui-installer.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:23.317Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kairos: :seedling: Webui installer", + "description": "**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #97 \n![Peek 2023-01-04 01-04](https://user-images.githubusercontent.com/2420543/210461794-fb80ad90-5d11-479d-945d-2e3ba3890435.gif)\n\nStill WIP, but its already functional", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "@check-spelling-bot apply [updates](https://github.com/kairos-io/kairos/actions/runs/3806058276/attempts/1).", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "^\\Q/tmp/check-spelling/kairos-io/kairos/pull-request/587/summary.txt\\E$", + ".github/actions/spelling/excludes.txt", + "
\n\n\nTo have the bot do this for you, reply quoting the following line:\n@check-spelling-bot apply [updates](https://github.com/kairos-io/kairos/actions/runs/3782713804/attempts/1).\n\n\n
Available :books: dictionaries could cover words not in the :blue_book: dictionary\n\nThis includes both **expected items** (639) from .github/actions/spelling/expect.txt and **unrecognized words** (1)\n\nDictionary | Entries | Covers\n-|-|-\n[cspell:cpp/src/cpp.txt](https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20220816/dictionaries/cpp/src/cpp.txt)|30216|119|\n[cspell:software-terms/src/software-terms.txt](https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20220816/dictionaries/software-terms/src/software-terms.txt)|1237|104|\n[cspell:node/node.txt](https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20220816/dictionaries/node/node.txt)|1768|43|\n[cspell:python/src/python/python-lib.txt](https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20220816/dictionaries/python/src/python/python-lib.txt)|3873|39|\n[cspell:php/php.txt](https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20220816/dictionaries/php/php.txt)|2597|36|\n[cspell:aws/aws.txt](https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20220816/dictionaries/aws/aws.txt)|218|24|\n[cspell:npm/npm.txt](https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20220816/dictionaries/npm/npm.txt)|288|23|\n[cspell:typescript/typescript.txt](https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20220816/dictionaries/typescript/typescript.txt)|1211|19|\n[cspell:node/src/node-old.txt](https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20220816/dictionaries/node/src/node-old.txt)|730|18|\n[cspell:win32/src/win32.txt](https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20220816/dictionaries/win32/src/win32.txt)|53509|17|\n\nConsider adding them using (in `.github/workflows/spelling.yml`):" + ] + } + }, + "metadata": { + "tags": [ + "kairos", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kairos" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kairos-io/kairos/pull/587", + "sourceRepo": "kairos-io/kairos", + "reactions": 1, + "comments": 29 + }, + "security": { + "scannedAt": "2026-02-27T17:48:23.317Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kairos/kairos-816-penguin-add-ubuntu-arm-rpi-flavor.json b/solutions/cncf-generated/kairos/kairos-816-penguin-add-ubuntu-arm-rpi-flavor.json new file mode 100644 index 00000000..fd9b5761 --- /dev/null +++ b/solutions/cncf-generated/kairos/kairos-816-penguin-add-ubuntu-arm-rpi-flavor.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:22.364Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kairos: :penguin: Add ubuntu arm rpi flavor", + "description": "**What this PR does / why we need it**:\nUbuntu flavors which support raspberrypi\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #645", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hey @c0ffee thanks! that's looking good at a first glance! I didn't had a deeper look on it yet, but will do as soon as possible - we need also to create the repository for the flavors as well before merging it", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + ":mega: We’re building smart automated test selection to slash your CI/CD build times. [Learn more](https://about.codecov.io/iterative-testing/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kairos-io)\n\nHey @c0ffee thanks! that's looking good at a first glance! I didn't had a deeper look on it yet, but will do as soon as possible - we need also to create the repository for the flavors as well before merging it\n@mudler yeap the repos have to exist good point. so far it is booting on a rpi4, but would be good if someone with deeper knowledge of kairos can have a look :)\r\n\r\nthe main point is the size, because of the uncompressed firmwares the 2gb standard size for rpi img wont be enough, how should we handle this? simple increase the limit for all images?\n> @mudler yeap the repos have to exist good point. so far it is booting on a rpi4, but would be good if someone with deeper knowledge of kairos can have a look :)\n> \n> the main point is the size, because of the uncompressed firmwares the 2gb standard size for rpi img wont be enough, how should we handle this? simple increase the limit for all images?\n\nVery nice!\n\n\nMaybe we can compress those Fw files somehow? If that is the main difference maybe we can get away with it.\nhttps://bugs.launchpad.net/ubuntu/+source/linux-firmware/+bug/1942260\r\n\r\nnot much movement there, maybe all are waiting for the zstd support in the kernel, but nevertheless this wouldnt help us on the lts releases :)\nI've created the quay repository for the images (core and k3s):\r\n\r\n- kairos-ubuntu-arm-rpi\r\n- core-ubuntu-arm-rpi\r\n- core-ubuntu-20-lts-arm-rpi\r\n- kairos-ubuntu-20-lts-arm-rpi\r\n- core-ubuntu-22-lts-arm-rpi\r\n- kairos-ubuntu-22-lts-arm-rpi\nJust a note: this is still a \"draft\" PR, although I think it's probably ready for review now :)\n> Just a note: this is still a \"draft\" PR, although I think it's probably ready for review now :)\n\nYes I have to build all 3 images again and try it again on hardware, this is why I didn't change the status yet :)\nTried to build these for testing, but I keep hitting this error:", + "https://github.com/mauromorales/kairos/actions/runs/4331798729/jobs/7563867150#step:8:6223\n@mauromorales, I think the relevant error from that log is a bit further up:" + ] + } + }, + "metadata": { + "tags": [ + "kairos", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kairos" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kairos-io/kairos/pull/816", + "sourceRepo": "kairos-io/kairos", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:48:22.364Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kaito/kaito-1477-feat-refactor-workspace-controller-to-support-nodeestimator-and-works.json b/solutions/cncf-generated/kaito/kaito-1477-feat-refactor-workspace-controller-to-support-nodeestimator-and-works.json new file mode 100644 index 00000000..7db52d2c --- /dev/null +++ b/solutions/cncf-generated/kaito/kaito-1477-feat-refactor-workspace-controller-to-support-nodeestimator-and-works.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:25.568Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kaito: feat: refactor workspace controller to support NodeEstimator and Workspace.Status.TargetNodeCount", + "description": "**Reason for Change**:\n\n1. Refactor Workspace controller reconcile as follows:\n\n- Update Workspace TargetNodeCount(UpdateWorkspaceTargetNodeCount())\n ⬇️\n- CheckNodeClaims(nodeClaimManager.CheckNodeClaims())\n ⬇️\n- CreateNodeClaims(nodeClaimManager.CreateNodeClaims())\n ⬇️\n- AreNodeClaimsReady(nodeClaimManager.AreNodeClaimsReady())\n ⬇️\n- AreNodePluginsReady(nodeResourceManager.AreNodePluginsReady())\n ⬇️\n- UpdateWorkerNodesInStatus(nodeResourceManager.UpdateWorkerNodesInStatus())\n ⬇️\n- Use `workspace.Status.TargetNodeCount` to configure underlay workload replicas.\n\n2. revert `v1beta1.Workspace` CRD: \n - remove `workspace.Inference.Replicas` and `workspace.Status.Inference`\n - add `workspace.Status.TargetNodeCount`\n\n3. remove scale subresource api of `Workspace` CRD, and a new CRD named InferenceSet will be added for supporting scale up/down inference workload.\n\n**Requirements**\n\n- [x] added unit tests and e2e tests (if applicable).\n\n**Issue Fixed**:\n\nFixes #1322", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/hold", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kaito", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kaito" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kaito-project/kaito/pull/1477", + "sourceRepo": "kaito-project/kaito", + "reactions": 0, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:25.568Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-1094-kubectl-karmada-add-init-command.json b/solutions/cncf-generated/karmada/karmada-1094-kubectl-karmada-add-init-command.json new file mode 100644 index 00000000..953e4709 --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-1094-kubectl-karmada-add-init-command.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:05.810Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: kubectl-karmada add `init` Command", + "description": "**What type of PR is this?**\n/kind design\n\n**What this PR does / why we need it**:\neasy and quick install of karmada\n**Which issue(s) this PR fixes**:\nFixes #804\n\n**Special notes for your reviewer**:\nkubectl-karmada add `init` Command\n```\n# ./kubectl-karmada\nkubectl karmada controls a Kubernetes Cluster Federation.\n\nUsage:\n karmada [flags]\n karmada [command]\n\nAvailable Commands:\n completion generate the autocompletion script for the specified shell\n cordon Mark cluster as unschedulable\n get Display one or many resources\n help Help about any command\n init bootstrap install karmada (default in kubernetes)\n join Register a cluster to control plane\n taint Update the taints on one or more clusters.\n uncordon Mark cluster as schedulable\n unjoin Remove the registration of a cluster from control plane\n version Print the version information.\n```\n**Does this PR introduce a user-facing change?**:\n\n```release-note\n\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi @RainbowMango\nI added a sub-command to kubectl-karmada. If there is no problem, the new sub-commands will also be added in this form.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# ./kubectl-karmada\r\nkubectl karmada controls a Kubernetes Cluster Federation.\r\n\r\nUsage:\r\n karmada [flags]\r\n karmada [command]\r\n\r\nAvailable Commands:\r\n completion generate the autocompletion script for the specified shell\r\n cordon Mark cluster as unschedulable\r\n get Display one or many resources\r\n help Help about any command\r\n init bootstrap install karmada (default in kubernetes)\r\n join Register a cluster to control plane\r\n taint Update the taints on one or more clusters.\r\n uncordon Mark cluster as schedulable\r\n unjoin Remove the registration of a cluster from control plane\r\n version Print the version information.", + "Welcome @prodanlabs! It looks like this is your first PR to karmada-io/karmada 🎉\nHi @RainbowMango\r\nI added a sub-command to kubectl-karmada. If there is no problem, the new sub-commands will also be added in this form.\nIt looks good. I was interested in this feature in #804. Thank you for implementing it.\r\n\r\nI have considered whether we can read the crd and other object from local file while compiling it, rather than setting it a constant like `pkg/kaadm/karmada/crds/bases_cluster_karmada_io_clusters.go`, because we possibly update these object, which could increase code maintenance costs. What do you think?\r\n\n@lonelyCZ Good idea. I'm still working on the final work of `karmada in container`, let me see if I have time later\nHi @RainbowMango trouble re-trigger `ci`\nDone. Sorry for the delay.\n\r\n\r\n\r\n\r\n> Done. Sorry for the delay.\r\n\r\nUnfortunately, it needs to be repeated again.\r\nThanks community, let me regain the golang specification :)\nHello, @prodanlabs , perhaps you can activate ci test in your github fork [github action](https://github.com/prodanlabs/karmada/actions).\r\n\r\nAlso, please squash your commits to one, and sign your commit according to [https://github.com/karmada-io/karmada/pull/1094/checks?check_run_id=4498109394](https://github.com/karmada-io/karmada/pull/1094/checks?check_run_id=4498109394)\nthank @lonelyCZ @RainbowMango , I have benefited a lot today.\r\n\r\nSubmit PR after passing the next inspection.\n> It looks good. I was interested in this feature in #804. Thank you for implementing it.\r\n\r\nYeah. I added `Fixes #804` to this PR description.\nI run it locally and looks great, sharing the steps here:" + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "approved", + "size-xxl", + "lgtm", + "kind-design" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/1094", + "sourceRepo": "karmada-io/karmada", + "reactions": 2, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:44:05.810Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-1149-implementing-the-revisereplica-hook.json b/solutions/cncf-generated/karmada/karmada-1149-implementing-the-revisereplica-hook.json new file mode 100644 index 00000000..01f8863e --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-1149-implementing-the-revisereplica-hook.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:08.449Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: Implementing the ReviseReplica hook", + "description": "Signed-off-by: Xinzhao Xu \n\n**What type of PR is this?**\n\n/kind feature\n\n**What this PR does / why we need it**:\n\nThis patch implements the `ReviseReplica` hook, check [resource interpreter webhook proposal](https://github.com/karmada-io/karmada/tree/master/docs/proposals/resource-interpreter-webhook) for more info.\n\nThe original code to modify the replica of `Deployment` and `Job` has been changed to use default interpreters, for custom resources, users only need to implement the corresponding webhook and return the patch that updates the replica, take our `workload` resource for example:\n\n```go\nfunc (e *workloadInterpreter) responseWithExploreReviseReplica(workload *workloadv1alpha1.Workload, req interpreter.Request) interpreter.Response {\n\twantedWorkload := workload.DeepCopy()\n\t// update the replica\n\twantedWorkload.Spec.Replicas = req.DesiredReplicas\n\tmarshaledBytes, err := json.Marshal(wantedWorkload)\n\tif err != nil {\n\t\treturn interpreter.Errored(http.StatusInt", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Signed-off-by: kerthcet \n\n**What type of PR is this?**\n/kind feature\n\n**What this PR does / why we need it**:\nfix cyclomatic complexity\n\n**Which issue(s) this PR fixes**:\nFixes #\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "I've tested it locally with [the workload example](https://github.com/karmada-io/karmada/tree/master/examples/customresourceinterpreter), everything works as excepted, my test files:\r\n\r\nworkload:", + "propagation policy:", + "And in the cluster member1, the workload detail:" + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "kind-feature", + "approved", + "size-l", + "lgtm" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [ + "Deployment", + "Job", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/1149", + "sourceRepo": "karmada-io/karmada", + "reactions": 2, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:08.449Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-1682-add-update-lifted-and-verify-lifted.json b/solutions/cncf-generated/karmada/karmada-1682-add-update-lifted-and-verify-lifted.json new file mode 100644 index 00000000..054c6bba --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-1682-add-update-lifted-and-verify-lifted.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:04.812Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: add update-lifted and verify-lifted", + "description": "Signed-off-by: yingjinhui \n\n**What type of PR is this?**\n/kind feature\n\n**What this PR does / why we need it**:\nUpdate and verify `lifted/doc.go` automatically.\n\n**Which issue(s) this PR fixes**:\nFixes #1652\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@XiShanYongYe-Chang @mrlihanbo @lonelyCZ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "kind-feature", + "approved", + "size-xxl", + "lgtm" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/1682", + "sourceRepo": "karmada-io/karmada", + "reactions": 2, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:44:04.812Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-2134-add-karmadactl-addons-subcommand.json b/solutions/cncf-generated/karmada/karmada-2134-add-karmadactl-addons-subcommand.json new file mode 100644 index 00000000..4487d9ee --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-2134-add-karmadactl-addons-subcommand.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:54.137Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: Add karmadactl addons subcommand", + "description": "Co-authored-by: duanmeng \nSigned-off-by: wuyingjun \n\n**What type of PR is this?**\n/kind feature\n\n**What this PR does / why we need it**:\nAdd karmadactl addons subcommand\n**Which issue(s) this PR fixes**:\nFixes https://github.com/karmada-io/karmada/issues/1957\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**_-------------------------------------------------latest test result-------------------------------------------------_**\n\n![image](https://user-images.githubusercontent.com/16109961/181877613-dc849d02-7f5f-43be-9f62-f93ae071628f.png)\n![image](https://user-images.githubusercontent.com/16109961/181877621-96eed658-8915-47e5-aa49-3ecac7b161ed.png)\n![image](https://user-images.githubusercontent.com/16109961/181877651-102483f3-ca64-486e-83e0-76ede2c05df1.png)\n![image](https://user-images.githubusercontent.com/16109961/181877670-973880e6-de47-4bed-982e-852640518a2a.png)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "**_-------------------------------------------------latest test result-------------------------------------------------_**\r\n\r\n![image](https://user-images.githubusercontent.com/16109961/181877613-dc849d02-7f5f-43be-9f62-f93ae071628f.png)\r\n![image](https://user-images.githubusercontent.com/16109961/181877621-96eed658-8915-47e5-aa49-3ecac7b161ed.png)\r\n![image](https://user-images.githubusercontent.com/16109961/181877651-102483f3-ca64-486e-83e0-76ede2c05df1.png)\r\n![image](https://user-images.githubusercontent.com/16109961/181877670-973880e6-de47-4bed-982e-852640518a2a.png)\r\n\r\n\n/assign @lonelyCZ \r\n/assign @prodanlabs \nThanks for your hard work. I will review it ASAP.\n> Thanks for your contributions! @wuyingjun-lucky @duanmengkk\r\n> \r\n> I just tested it that was very cool. I think we can start to review it, this is fisrt round.\r\n\r\nHelp to review again \n@lonelyCZ help to review again\nReady to be reviewed @lonelyCZ \nIt seems not to view options", + "> It seems not to view options\r\n> \r\n>", + "Hi, @lonelyCZ \r\n\r\nThe `options` seems like a sub command introduced by [karmadactl](https://github.com/karmada-io/karmada/blob/master/pkg/karmadactl/karmadactl.go#:~:text=filters%20%3A%3D%20%5B%5D,groups...).\r\n\"image\"\r\n\"image\"\r\nand it works for showing global command. \r\nUsing `karmadactl join -h` or other subcommand will show the same information\r\n\"image\"\r\n\"image\"\r\n\r\n\r\nDo you think is it ok to use a new pr or issue to trace the `options command` ?\r\n\r\n\nNice finding!\r\n\r\n> Do you think is it ok to use a new pr or issue to trace the options command ?\r\n\r\nYes, it is ok. I found it didn't work for subcommand." + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "kind-feature", + "approved", + "size-xxl", + "lgtm" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/2134", + "sourceRepo": "karmada-io/karmada", + "reactions": 6, + "comments": 30 + }, + "security": { + "scannedAt": "2026-02-27T17:43:54.137Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-3448-support-rerun-github-failed-workflow.json b/solutions/cncf-generated/karmada/karmada-3448-support-rerun-github-failed-workflow.json new file mode 100644 index 00000000..6f068712 --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-3448-support-rerun-github-failed-workflow.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:00.307Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: Support rerun github failed workflow", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**What this PR does / why we need it**:\n\nCreate a issue comment of include \"/retest\" to rerun workflow when CI failed.\n\n**Which issue(s) this PR fixes**:\nFixes #3421\n\n**Special notes for your reviewer**:\n\nThis event will only trigger a workflow run if the workflow file is on the default branch.\n\nSo you can comment at [this PR](https://github.com/liangyuanpeng/karmada/pull/8) for check what the happen when we create a issue comment of \"/retest\" on the PR.(Yes, rerun the failed workflow.)\n\nAlso run `hack/ghaction.sh` on the local, Just like:\n\n```shell\nlan@lan:~/repo/git/karmada/hack$ GH_TOKEN=xxx ISSUE_COMMENT=\"hello\\r\\n/retest\" PR_NUM=8 REPO=liangyuanpeng/karmada ./ghaction.sh\nMatching /retest and rerun workflow...\nReruning workflow...\nPR:test retest github action \nURL:https://github.com/liangyuanpeng/karmada/pull/8 \nID:4763218620 \nWorkflowRunName:CLI \n===============================\n✓ Requested rerun of run 4763218620\nReruning workflow...", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n/kind bug\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\nFixes #3525\nFixes #3572\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nkarmada-controller-manager: Retain fields that do not support updates of pod.\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "lan@lan:~/repo/git/karmada/hack$ GH_TOKEN=xxx ISSUE_COMMENT=\"hello\\r\\n/retest\" PR_NUM=8 REPO=liangyuanpeng/karmada ./ghaction.sh\r\nMatching /retest and rerun workflow...\r\nReruning workflow...\r\nPR:test retest github action \r\nURL:https://github.com/liangyuanpeng/karmada/pull/8 \r\nID:4763218620 \r\nWorkflowRunName:CLI \r\n===============================\r\n✓ Requested rerun of run 4763218620\r\nReruning workflow...\r\nPR:test retest github action \r\nURL:https://github.com/liangyuanpeng/karmada/pull/8 \r\nID:4763218619 \r\nWorkflowRunName:CI Workflow \r\n===============================\r\n✓ Requested rerun of run 4763218619\r\nReruning workflow...\r\nPR:test retest github action \r\nURL:https://github.com/liangyuanpeng/karmada/pull/8 \r\nID:4763218327 \r\nWorkflowRunName:CLI \r\n===============================\r\n✓ Requested rerun of run 4763218327\r\nReruning workflow...\r\nPR:test retest github action \r\nURL:https://github.com/liangyuanpeng/karmada/pull/8 \r\nID:4763218326 \r\nWorkflowRunName:CI Workflow \r\n===============================\r\n✓ Requested rerun of run 4763218326", + "**What type of PR is this?**\r\n/kind bug\r\n\r\n\r\n**What this PR does / why we need it**:\r\n\r\n**Which issue(s) this PR fixes**:\r\nFixes #3525\r\nFixes #3572\r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n", + "cc @XiShanYongYe-Chang @jwcesign Could you please help to take a look?\n/retest\nHi @liangyuanpeng, does `/retest` comment rerun all jobs? `/retest-failed` comment runs only the jobs that fail?\n> does /retest comment rerun all jobs? /retest-failed comment runs only the jobs that fail?\r\n\r\nMake sense, i will change it to `/retest-faile` and working for `/retest` on the next PR.\n[APPROVALNOTIFIER] This PR is **NOT APPROVED**\n\nThis pull-request has been approved by:\nTo complete the [pull request process](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process), please ask for approval from **rainbowmango** after the PR has been reviewed.\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=karmada-io%2Fkarmada).\n\n
\nNeeds approval from an approver in each of these files:\n\n- **[OWNERS](https://github.com/karmada-io/karmada/blob/master/OWNERS)**\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n## [Codecov](https://app.codecov.io/gh/karmada-io/karmada/pull/3448?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) Report\n> Merging [#3448](https://app.codecov.io/gh/karmada-io/karmada/pull/3448?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) (c87c6cd) into [master](https://app.codecov.io/gh/karmada-io/karmada/commit/2be4b33d01db39ebcbb1065920e0086bbd7841db?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) (2be4b33) will **increase** coverage by `0.94%`.\n> The diff coverage is `n/a`.\n\n:exclamation: Your organization is not using the GitHub App Integration. As a result you may experience degraded service beginning May 15th. Please [install the Github App Integration](https://github.com/apps/codecov) for your organization. [Read more](https://about.codecov.io/blog/codecov-is-updating-its-github-integration/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io)." + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "kind-feature", + "size-l", + "lgtm" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/3448", + "sourceRepo": "karmada-io/karmada", + "reactions": 3, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:44:00.307Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-4534-mvp-add-resourcequota-plugin-in-scheduler-estimator-create-framewor.json b/solutions/cncf-generated/karmada/karmada-4534-mvp-add-resourcequota-plugin-in-scheduler-estimator-create-framewor.json new file mode 100644 index 00000000..04e27f63 --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-4534-mvp-add-resourcequota-plugin-in-scheduler-estimator-create-framewor.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:01.300Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: [MVP] add resourcequota plugin in scheduler-estimator: create framework for scheulder-estimator", + "description": "**What type of PR is this?**\nfeature\n\n**What this PR does / why we need it**:\n\n***why we need it***:\nThis PR is to resolve https://github.com/karmada-io/karmada/issues/4369\n\nIn our multi-tenant k8s clusters, we have namespace/ResourceQuota for each tenant duplicated on all clusters. \nFor example\n- cluster 1: namespace s-ns-1, resource quota hard: limits.cpu 40, limits.memory: 512Gi, requests.cpu 40, requests.memory: 512Gi\n- cluster 2: namespace s-ns-1, resource quota hard: limits.cpu 40, limits.memory: 512Gi, requests.cpu 40, requests.memory: 512Gi\n\nWe want to use Karmada to manage cluster 1 and cluster 2. When users submit jobs from karmada, it should take ResourceQuota into account during the scheduling process. In a scenario where clients uses the entire quota from \"s-ns-1\" in Cluster 1 but still have sufficient quota to execute jobs in Cluster 2, karmada should filter out cluster 1 and pick cluster 2 as the target cluster.\n\n*** what this PR does***:\nIn this PR we\n- Introduce the p", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thank you for your important commit ! Besides, you can mark above multiple comments as `resolved`~\n\n/LGTM", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "// ReplicaRequirements represents the requirements required by each replica.\r\n type ReplicaRequirements struct {\r\n\t // NodeClaim represents the NodeAffinity, NodeSelector and Tolerations required by each replica.\r\n\t // +optional\r\n\t NodeClaim *NodeClaim `json:\"nodeClaim,omitempty\" protobuf:\"bytes,1,opt,name=nodeClaim\"`\r\n\t // ResourceRequest represents the resources required by each replica.\r\n\t // +optional\r\n\t ResourceRequest corev1.ResourceList `json:\"resourceRequest,omitempty\" protobuf:\"bytes,2,rep,name=resourceRequest,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName\"`\r\n\t // +optional\r\n\t Namespace string `json:\"namespace,omitempty\" protobuf:\"bytes,3,opt,name=namespace\"`\r\n\t // +optional\r\n\t PriorityClassName string `json:\"priorityClassName,omitempty\" protobuf:\"bytes,4,opt,name=priorityClassName\"`\r\n }", + "// ReplicaRequirements represents the requirements required by each replica.\r\n type ReplicaRequirements struct {\r\n\t // NodeClaim represents the node claim HardNodeAffinity, NodeSelector and Tolerations required by each replica.\r\n\t // +optional\r\n\t NodeClaim *NodeClaim `json:\"nodeClaim,omitempty\"`\r\n \r\n\t // ResourceRequest represents the resources required by each replica.\r\n\t // +optional\r\n\t ResourceRequest corev1.ResourceList `json:\"resourceRequest,omitempty\"`\r\n \r\n\t // Namespace represents the resources namespaces\r\n\t // +optional\r\n\t Namespace string `json:\"namespace,omitempty\"`\r\n \r\n\t // PriorityClassName represents the resources priorityClassName\r\n\t // +optional\r\n\t PriorityClassName string `json:\"priorityClassName,omitempty\"`\r\n }", + "Welcome @wengyao04! It looks like this is your first PR to karmada-io/karmada 🎉\n## [Codecov](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) Report\nAttention: `29 lines` in your changes are missing coverage. Please review.\n> Comparison is base [(`e7192a9`)](https://app.codecov.io/gh/karmada-io/karmada/commit/e7192a9ecc7bd01f1ef103411bdb78a5cd8fcddc?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) 51.85% compared to head [(`85dd8db`)](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) 51.89%.\n> Report is 20 commits behind head on master.\n\n| [Files](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) | Patch % | Lines |\n|---|---|---|\n| [...kg/estimator/server/framework/runtime/framework.go](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io#diff-cGtnL2VzdGltYXRvci9zZXJ2ZXIvZnJhbWV3b3JrL3J1bnRpbWUvZnJhbWV3b3JrLmdv) | 67.85% | [16 Missing and 2 partials :warning: ](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) |\n| [pkg/estimator/server/estimate.go](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io#diff-cGtnL2VzdGltYXRvci9zZXJ2ZXIvZXN0aW1hdGUuZ28=) | 25.00% | [4 Missing and 2 partials :warning: ](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) |\n| [pkg/estimator/server/server.go](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io#diff-cGtnL2VzdGltYXRvci9zZXJ2ZXIvc2VydmVyLmdv) | 75.00% | [2 Missing and 1 partial :warning: ](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) |\n| [cmd/scheduler-estimator/app/options/options.go](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io#diff-Y21kL3NjaGVkdWxlci1lc3RpbWF0b3IvYXBwL29wdGlvbnMvb3B0aW9ucy5nbw==) | 0.00% | [2 Missing :warning: ](https://app.codecov.io/gh/karmada-io/karmada/pull/4534?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) |\n\n:exclamation: Your organization needs to install the [Codecov GitHub app](https://github.com/apps/codecov/installations/select_target) to enable full functionality.\n\n
Additional details and impacted files" + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "approved", + "size-xxl", + "lgtm", + "tide-merge-method-squash" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [ + "Job", + "Namespace", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/4534", + "sourceRepo": "karmada-io/karmada", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:01.300Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-4766-propagate-secret-of-type-kubernetes-io-service-account-token.json b/solutions/cncf-generated/karmada/karmada-4766-propagate-secret-of-type-kubernetes-io-service-account-token.json new file mode 100644 index 00000000..6f72d0ed --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-4766-propagate-secret-of-type-kubernetes-io-service-account-token.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:56.665Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: propagate `Secret` of type `kubernetes.io/service-account-token`", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**What this PR does / why we need it**:\nPrior to kubernetes 1.24, Kubernetes controller-manager automatically created a Secret (with a long-lived token). Starting with 1.24, in order to create a Secret with long-lived token, you have to manually create a Secret and link it to the Service Account. Karmada currently doesn't support propagating this Secret as it is explicitly disabled.\n\n**Which issue(s) this PR fixes**:\nFixes #4752\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\nYes\n```release-note\nkarmada-controller-manager: propagate `Secret` of type `kubernetes.io/service-account-token`\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "…pruners\n\n**What type of PR is this?**\n\n/kind cleanup\n\n**What this PR does / why we need it**:\nAllows for more pruners to be added and avoid lint errors such as `cyclomatic complexity` in https://github.com/karmada-io/karmada/pull/4766\n\n**Which issue(s) this PR fixes**:\nFixes #\n\n**Special notes for your reviewer**:\nSee PR https://github.com/karmada-io/karmada/pull/4766 and discussion with @XiShanYongYe-Chang \n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "…pruners\r\n\r\n**What type of PR is this?**\r\n\r\n\r\n\r\n/kind cleanup\r\n\r\n**What this PR does / why we need it**:\r\nAllows for more pruners to be added and avoid lint errors such as `cyclomatic complexity` in https://github.com/karmada-io/karmada/pull/4766\r\n\r\n**Which issue(s) this PR fixes**:\r\nFixes #\r\n\r\n**Special notes for your reviewer**:\r\nSee PR https://github.com/karmada-io/karmada/pull/4766 and discussion with @XiShanYongYe-Chang \r\n\r\n**Does this PR introduce a user-facing change?**:\r\n", + "## [Codecov](https://app.codecov.io/gh/karmada-io/karmada/pull/4766?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) Report\nAttention: Patch coverage is `35.00000%` with `13 lines` in your changes are missing coverage. Please review.\n> Project coverage is 51.79%. Comparing base [(`ff7322a`)](https://app.codecov.io/gh/karmada-io/karmada/commit/ff7322acf252632ef2c20864b904a6cd65fa5400?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) to head [(`ae9f508`)](https://app.codecov.io/gh/karmada-io/karmada/pull/4766?dropdown=coverage&src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io).\n\n| [Files](https://app.codecov.io/gh/karmada-io/karmada/pull/4766?dropdown=coverage&src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) | Patch % | Lines |\n|---|---|---|\n| [pkg/resourceinterpreter/default/native/retain.go](https://app.codecov.io/gh/karmada-io/karmada/pull/4766?src=pr&el=tree&filepath=pkg%2Fresourceinterpreter%2Fdefault%2Fnative%2Fretain.go&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io#diff-cGtnL3Jlc291cmNlaW50ZXJwcmV0ZXIvZGVmYXVsdC9uYXRpdmUvcmV0YWluLmdv) | 35.00% | [9 Missing and 4 partials :warning: ](https://app.codecov.io/gh/karmada-io/karmada/pull/4766?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) |\n\n:exclamation: Your organization needs to install the [Codecov GitHub app](https://github.com/apps/codecov/installations/select_target) to enable full functionality.\n\n
Additional details and impacted files", + "| [Flag](https://app.codecov.io/gh/karmada-io/karmada/pull/4766/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) | Coverage Δ | |\n|---|---|---|\n| [unittests](https://app.codecov.io/gh/karmada-io/karmada/pull/4766/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) | `51.79% <35.00%> (+<0.01%)` | :arrow_up: |\n\nFlags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io#carryforward-flags-in-the-pull-request-comment) to find out more.\n\n\n
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/karmada-io/karmada/pull/4766?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io).\n\nYou can add the component name `karmada-controller-manager` to the release-note.\n> The retain logic is modified in the current PR. However, retain is used when resources in member clusters are updated. If resources are created, will the UID and token information in the secret on the control plane be carried to the member cluster?\r\n\r\nKindly ping @a7i\n@XiShanYongYe-Chang Yes it will but we observed in our case that the member cluster kube-controller-manager will fix up the values and get ignored by karmada on update.\r\n\r\nIs there a \"global\" level ignore for CREATE as well?\nMaybe we can do it in the `karmada-webhook`:\r\n\r\nhttps://github.com/karmada-io/karmada/blob/eadf919b6f25dc77873178e88a55983d05b03867/pkg/webhook/work/mutating.go#L61\n@XiShanYongYe-Chang done, I kept as two separate commits for easier review. Happy to rebase and squash if needed.\nIt occurs a lint error:" + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "kind-feature", + "approved", + "size-l", + "lgtm" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [ + "Service", + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/4766", + "sourceRepo": "karmada-io/karmada", + "reactions": 4, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:43:56.665Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-5116-stateful-failover-proposal.json b/solutions/cncf-generated/karmada/karmada-5116-stateful-failover-proposal.json new file mode 100644 index 00000000..82566617 --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-5116-stateful-failover-proposal.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:02.830Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: Stateful Failover Proposal", + "description": "**What type of PR is this?**\nProposal for stateful failover\n\n**What this PR does / why we need it**:\nExplained in the doc\n\n**Which issue(s) this PR fixes**:\nFixes #5006, #4969\n\n**Special notes for your reviewer**:\nN/A\n\n**Does this PR introduce a user-facing change?**:\nNONE", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n/kind feature\n\n**What this PR does / why we need it**:\nAdding failover history information so that applications can keep a record of what failovers happened in the past.\nStateful applications can use this information to detect failures and continue processing from a particular state\n\n**Which issue(s) this PR fixes**:\nFixes #5116 #4969 \n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Welcome @Dyex719! It looks like this is your first PR to karmada-io/karmada 🎉\n:warning: Please install the !['codecov app svg image'](https://github.com/codecov/engineering-team/assets/152432831/e90313f4-9d3a-4b63-8b54-cfe14e7ec20d) to ensure uploads and comments are reliably processed by Codecov.\n\n## [Codecov](https://app.codecov.io/gh/karmada-io/karmada/pull/5116?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) Report\nAll modified and coverable lines are covered by tests :white_check_mark:\n> Project coverage is 40.91%. Comparing base [(`2271a41`)](https://app.codecov.io/gh/karmada-io/karmada/commit/2271a41ab5614a14db15243194394b5de9825e85?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) to head [(`fd35fb4`)](https://app.codecov.io/gh/karmada-io/karmada/commit/fd35fb4edd1780fad4572263b7fb18446c2ad3da?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io).\n> Report is 680 commits behind head on master.\n\n:exclamation: Your organization needs to install the [Codecov GitHub app](https://github.com/apps/codecov/installations/select_target) to enable full functionality.\n\n
Additional details and impacted files", + "| [Flag](https://app.codecov.io/gh/karmada-io/karmada/pull/5116/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) | Coverage Δ | |\n|---|---|---|\n| [unittests](https://app.codecov.io/gh/karmada-io/karmada/pull/5116/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) | `40.91% <ø> (+12.69%)` | :arrow_up: |\n\nFlags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io#carryforward-flags-in-the-pull-request-comment) to find out more.\n\n
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/karmada-io/karmada/pull/5116?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io).\n\nThanks @RainbowMango! One assumption that we wanted to discuss about (I wrote this in the proposal too):", + "Do you think this is a valid assumption? This narrows our scope a little, but defines the problem more clearly.\n> Do you think this is a valid assumption? This narrows our scope a little, but defines the problem more clearly.\r\n\r\nI 100% agree. \r\n1. Partially replica migration is not technically failover, but more like elastic scaling of replicas. \r\n2. The conditions under which failover triggers are fully configurable, if people tolerate the failure of part of replicas, then migration should not be triggered, if they don't, the application should be rebuilt, by leveraging failover.\n[APPROVALNOTIFIER] This PR is **NOT APPROVED**\n\nThis pull-request has been approved by:\n**Once this PR has been reviewed and has the lgtm label**, please ask for approval from [rainbowmango](https://github.com/rainbowmango). For more information see [the Kubernetes Code Review Process](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process).\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=karmada-io%2Fkarmada).\n\n
\nNeeds approval from an approver in each of these files:\n\n- **[OWNERS](https://github.com/karmada-io/karmada/blob/master/OWNERS)**\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\nHi @Dyex719, @mszacillo, I've been thinking this feature recently and came up with some ideas, this feature consists of 3 parts:\r\n\r\nThe first part is how to declare which state data(fields) should be preserved during failover, and I post my idea at https://github.com/karmada-io/karmada/pull/5116#discussion_r1685365859. Please take a look.\r\n\r\nThe second part is how to store the preserved state data, one approach is to store them in the history items(this is our first idea), the challenge things is that it's hard to maintain the history item, especially figuring out what is the destination cluster. \r\n\r\nI think it is worth thinking about storing them in the [GracefulEvictionTask](https://github.com/karmada-io/karmada/blob/f2b37a43a3e7d85277b6481eea876b13b4cb8a88/pkg/apis/work/v1alpha2/binding_types.go#L225-L277), during the failover process, just before creating the eviction task, it makes sense to grab some state(fields) or snapshot of scheduled cluster list. With this grabbed data, the controller would get to know which cluster is the destination by comparing the snapshot and the newly scheduled cluster. \r\n\r\n[edit] \r\nI don't mean I don't like the first approach, just raise another idea. Maybe we also can add a snapshot of the scheduled cluster to the history item. The most challenging thing for this part is distinguishing which field should be managed by which component(scheduler, or controller), and they should be decoupled. \r\n\r\nThe third part is how to feed(or inject) the preserved state data to the destination cluster. This is going to be not that complex as long as the controller can figure out the destination cluster and only feed the state data when creating the application. \r\n\nHi @RainbowMango,\r\n\r\nTo address your comment:\r\n\r\n> The second part is how to store the preserved state data, one approach is to store them in the history items(this is our first idea), the challenge things is that it's hard to maintain the history item, especially figuring out what is the destination cluster.\r\n\r\nOne thing we were thinking about it is to only store the cluster from which the workload failed over from. This would help keep the logic simple without involving multiple components. The current cluster the workload is scheduled on can always be inferred from the resourcebinding.\r\n\r\nWith this we would achieve:" + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "size-l" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/5116", + "sourceRepo": "karmada-io/karmada", + "reactions": 3, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:44:02.830Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-5118-proposal-support-for-cluster-level-resource-propagation-pause-and-r.json b/solutions/cncf-generated/karmada/karmada-5118-proposal-support-for-cluster-level-resource-propagation-pause-and-r.json new file mode 100644 index 00000000..8a10ee51 --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-5118-proposal-support-for-cluster-level-resource-propagation-pause-and-r.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:57.761Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: [Proposal] Support for cluster-level resource propagation pause and resume capabilities", + "description": "**What type of PR is this?**\n\n/kind design\n/kind documentation\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\nFixes #1567, #4421, #4688\n\n**Special notes for your reviewer**:\n\nOther related issues: #4937 \n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/cc @CharlesQQ @a7i @chaunceyjiang @whitewindmills \nHi guys, can you help take a look?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/cc @CharlesQQ @a7i @chaunceyjiang @whitewindmills \r\nHi guys, can you help take a look?\n@XiShanYongYe-Chang: GitHub didn't allow me to request PR reviews from the following users: CharlesQQ.\n\nNote that only [karmada-io members](https://github.com/orgs/karmada-io/people) and repo collaborators can review this PR, and authors cannot review their own PRs.\n\n
\n\nIn response to [this](https://github.com/karmada-io/karmada/pull/5118#issuecomment-2201798571):\n\n>/cc @CharlesQQ @a7i @chaunceyjiang @whitewindmills \r\n>Hi guys, can you help take a look?\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes-sigs/prow](https://github.com/kubernetes-sigs/prow/issues/new?title=Prow%20issue:) repository.\n
\n:warning: Please install the !['codecov app svg image'](https://github.com/codecov/engineering-team/assets/152432831/e90313f4-9d3a-4b63-8b54-cfe14e7ec20d) to ensure uploads and comments are reliably processed by Codecov.\n\n## [Codecov](https://app.codecov.io/gh/karmada-io/karmada/pull/5118?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) Report\nAll modified and coverable lines are covered by tests :white_check_mark:\n> Project coverage is 28.25%. Comparing base [(`a87ec2a`)](https://app.codecov.io/gh/karmada-io/karmada/commit/a87ec2a61abadf9f102440b1f64cf338b2ee5518?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) to head [(`24e56c7`)](https://app.codecov.io/gh/karmada-io/karmada/commit/24e56c7fc908d1cbbfb148516cf46fb5f4a34e88?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io).\n> Report is 1163 commits behind head on master.\n\n:exclamation: Your organization needs to install the [Codecov GitHub app](https://github.com/apps/codecov/installations/select_target) to enable full functionality.\n\n
Additional details and impacted files", + "| [Flag](https://app.codecov.io/gh/karmada-io/karmada/pull/5118/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) | Coverage Δ | |\n|---|---|---|\n| [unittests](https://app.codecov.io/gh/karmada-io/karmada/pull/5118/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) | `28.25% <ø> (+0.01%)` | :arrow_up: |\n\nFlags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io#carryforward-flags-in-the-pull-request-comment) to find out more.\n\n
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/karmada-io/karmada/pull/5118?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io).\n\n
:rocket: New features to boost your workflow: \n\n- :snowflake: [Test Analytics](https://docs.codecov.com/docs/test-analytics): Detect flaky tests, report on failures, and find test suite problems.\n
\n@XiShanYongYe-Chang \r\nit's also releated [issue](https://github.com/karmada-io/karmada/issues/4937)\n/assign\nHi @CharlesQQ @chaunceyjiang @whitewindmills @RainbowMango , it's ready to review again.\n> Hi! Thanks for your proposal!\r\n> \r\n> In Solution One, the `SchedulePause` of `ResourceBinding` is propagated through `PropagationPolicy`. If I create a `PropagationPolicy` with a `PauseStrategy` set to `nil` by default, is it feasible to only update the `SchedulePause` of `ResourceBinding` to true via a webhook when the `karmada detector` creates the `ResourceBinding`? If this is feasible, does \"propagation\" mean that the associated `ResourceBinding/Work` is only updated when the `PropagationPolicy` updates the `PauseStrategy` field?\r\n> \r\n> I also have a suggestion: can this issue be considered as a user story? In brief, it involves pausing the `ResourceBinding` at the time of creation through a webhook, and then using a custom controller to determine when certain `ResourceBindings` can be scheduled.\r\n> \r\n> #4919\r\n\r\n+1\r\nWhat if set rb's schedulePause to true directly by webhook? this case will not get propagation involved, seems it's an independent case and is not like the `work` pause which is inherited from pp.\n> is it feasible to only update the SchedulePause of ResourceBinding to true via a webhook when the karmada detector creates the ResourceBinding?\r\n\r\nFor solution 1, the source of user control is in the PropagationPolicy. If it is modified through webhook, it will be inconsistent with the declaration of PropagationPolicy.\r\n\r\nI think solution 2 might be better for your scenario.\n> In brief, it involves pausing the ResourceBinding at the time of creation through a webhook, and then using a custom controller to determine when certain ResourceBindings can be scheduled.\r\n\r\nI understand that this is a specific implementation, not a user case. In fact, the two actions you point out can be done by controlling the pause of RB. Karmada is not aware of when you pause and resume.\r\n\r\nThat's why I pointed out that solution 2 is more suitable for your use case.\nWhen the user sets pause at the pp or cr level, other components such as wenhook may also set pause/resume at the rb or work level. There will be a conflict here. We should explain this limitation: )\nYes, if you need to set it up via a webhook, the user needs to be aware of the restrictions.\nIn fact, what I am concerned about is whether the new work object has been modified by the latest overridepolicy after resumption of propagation. This must be ensured.\n> In fact, what I am concerned about is whether the new work object has been modified by the latest overridepolicy after resumption of propagation. This must be ensured.\r\n\r\nGood question! What you say should be the definitive expected behavior, because when you change the op, work is updated, and when work is resumed, it is updated with the latest spec.\nI would prefer to introduce this functionality in PropagationPolicy as it is more convenient for users and straightforward than introducing another API. \r\n\r\nAlso proposing the API:" + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "approved", + "size-l", + "kind-documentation", + "lgtm", + "kind-design" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/5118", + "sourceRepo": "karmada-io/karmada", + "reactions": 4, + "comments": 25 + }, + "security": { + "scannedAt": "2026-02-27T17:43:57.761Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-5615-one-cluster-should-have-only-one-transport-to-reduce-the-number-of-.json b/solutions/cncf-generated/karmada/karmada-5615-one-cluster-should-have-only-one-transport-to-reduce-the-number-of-.json new file mode 100644 index 00000000..5338c24c --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-5615-one-cluster-should-have-only-one-transport-to-reduce-the-number-of-.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:55.069Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: one cluster should have only one transport to reduce the number of TCP connections", + "description": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\nIt can solve the problem about too many tcp connection between aggregated apiserver and apiserver of member's cluster\n**Which issue(s) this PR fixes**:\nFixes #5574\n\n**Special notes for your reviewer**:\n\n\"截屏2024-09-27\n\ntest.sh\n````\nKUBECONFIG=\"/root/.kube/karmada.config\"\nSLEEP_INTERVAL=0.1\nMAX_JOBS=100\nfunction run_karmadactl() {\n for ((i = 1; i <= 50; i++)); do\n karmadactl --kubeconfig \"$KUBECONFIG\" get node --operation-scope=members\n sleep \"$SLEEP_INTERVAL\"\n done\n}\nfor ((i = 1; i <= MAX_JOBS; i++)); do\n run_karmadactl &\ndone\nwait\n````\n\nresult.sh\n````\n#!/bin/bash\n\nwhile true\ndo\n tcp_count=$(netstat -anp | grep 6443| wc -l)\n sleep 1\n echo \"$(date '+%Y-%m-%d %H:%M:%S') - Current total TCP connections: $((tcp_count))\"\ndone\n````\n\nfix before\n\"企业微信截图_f9386612-1d3f-402c-8ae", @zclyne Thanks~ I have no further comment cc @mohamedawnallah for another look\n\nThanks guys, I will take another look EOD. I have an old windows server and I can test before/after this PR\n\nEDIT:\nUnfortunately I wasn't able to spin up that windows server because of hardware issues", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Not sure why helm tests are failing as I haven't touched those files at all.. Will look into it\n:warning: Please install the !['codecov app svg image'](https://github.com/codecov/engineering-team/assets/152432831/e90313f4-9d3a-4b63-8b54-cfe14e7ec20d) to ensure uploads and comments are reliably processed by Codecov.\n\n## [Codecov](https://app.codecov.io/gh/karmada-io/karmada/pull/6581?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io) Report\n:white_check_mark: All modified and coverable lines are covered by tests.\n:white_check_mark: Project coverage is 45.35%. Comparing base ([`a2c2057`](https://app.codecov.io/gh/karmada-io/karmada/commit/a2c2057761f11c8c9c32d9c7fc446c7a32c80339?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io)) to head ([`696e2a9`](https://app.codecov.io/gh/karmada-io/karmada/commit/696e2a9a75a3c995e5fde67d80d792df7e8d5968?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=karmada-io)).\n:warning: Report is 27 commits behind head on master.\n:exclamation: Your organization needs to install the [Codecov GitHub app](https://github.com/apps/codecov/installations/select_target) to enable full functionality.\n\n
Additional details and impacted files" + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "kind-bug", + "approved", + "lgtm", + "size-m" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/6581", + "sourceRepo": "karmada-io/karmada", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:03.851Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-754-add-get-function-to-karmadactl.json b/solutions/cncf-generated/karmada/karmada-754-add-get-function-to-karmadactl.json new file mode 100644 index 00000000..4bc6a741 --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-754-add-get-function-to-karmadactl.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:58.811Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: add get function to karmadactl", + "description": "Signed-off-by: QAQ-rookie <781647222@qq.com>\n\n**What type of PR is this?**\n/kind feature \n\n**What this PR does / why we need it**:\nThis PR provides the karmadactl get command, get multi-cluster information via karmadactl.\n\n**Which issue(s) this PR fixes**:\nFixes #666\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi @QAQ-rookie Could you please add PR descriptions about how to use it and your test result?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Welcome @QAQ-rookie! It looks like this is your first PR to karmada-io/karmada 🎉\nHi @QAQ-rookie Could you please add PR descriptions about how to use it and your test result?\nUse the karmadactl get command in the following way and test result: \r\neg:", + "Too many changed files in one commit. Could you please separate the files into 2 commits? One is for vendor or generated files, the other is your core change.\n> Too much changed file in one commit. Could you please separate the files into 2 commits? One is for vendor or generated files, the other is your core change.\r\n\r\nI have split the commit twice. Thanks.\nBy the way, please fix the DCO. @QAQ-rookie \r\nhttps://github.com/karmada-io/karmada/pull/754/checks?check_run_id=3684671924\n> By the way, please fix the DCO. @QAQ-rookie\r\n> https://github.com/karmada-io/karmada/pull/754/checks?check_run_id=3684671924\r\n\r\nfixed\n@QAQ-rookie Please tidy your commit.\n@XiShanYongYe-Chang Please help review this PR, the helper functions used by controllers might be reusable by command.\n> @XiShanYongYe-Chang Please help review this PR, the helper functions used by controllers might be reusable by command.\r\n\r\nOk.\n/lgtm\r\n/approve\nHi @QAQ-rookie \r\n\r\nYou are using different author name and author emails in the two commits." + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "kind-feature", + "approved", + "size-xxl", + "lgtm" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/754", + "sourceRepo": "karmada-io/karmada", + "reactions": 3, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:43:58.811Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/karmada/karmada-756-add-finalizer-in-cluster-resourcebinding-to-delete-works.json b/solutions/cncf-generated/karmada/karmada-756-add-finalizer-in-cluster-resourcebinding-to-delete-works.json new file mode 100644 index 00000000..23cbf7e3 --- /dev/null +++ b/solutions/cncf-generated/karmada/karmada-756-add-finalizer-in-cluster-resourcebinding-to-delete-works.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:06.815Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "karmada: Add finalizer in (Cluster)ResourceBinding to delete works", + "description": "**What type of PR is this?**\n/kind bug\n\n**What this PR does / why we need it**:\nAdd finalizer in ResourceBinding and ClusterResourceBinding to delete works.\n\nIf we delete workload while karmada-controller-manager crashes, this will make sure **(Cluster)ResourceBinding remains until controller-manager is ready, then it handles the delete event to delete related works**.\n\n**Which issue(s) this PR fixes**:\nFixes #708\n\n**Special notes for your reviewer**:\nI followed [this](https://github.com/karmada-io/karmada/issues/708#issue-990675467) and verified it works in my local environment.\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This patch is reasonable to me. Would like to take a look?\n@mrlihanbo @Garrybest", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "karmada", + "incubating", + "orchestration", + "kind-bug", + "approved", + "lgtm", + "size-m" + ], + "category": "troubleshooting", + "cncfProjects": [ + "karmada" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/karmada-io/karmada/pull/756", + "sourceRepo": "karmada-io/karmada", + "reactions": 2, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:44:06.815Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kcp/kcp-1601-add-owners-files-and-notes-on-review-approver-role-to-contributing-md.json b/solutions/cncf-generated/kcp/kcp-1601-add-owners-files-and-notes-on-review-approver-role-to-contributing-md.json new file mode 100644 index 00000000..92509749 --- /dev/null +++ b/solutions/cncf-generated/kcp/kcp-1601-add-owners-files-and-notes-on-review-approver-role-to-contributing-md.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:31.808Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kcp: Add owners files and notes on review/approver role to CONTRIBUTING.md", + "description": "Fixes #1557 \n\nFigured this might be easier with something to actually apply red pen to. Note, this is not a judgement of any contributor's capabilities. I simply looked at history and picked what I thought looked like names that were frequently contributing to areas. Comment and change suggestions encouraged.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "- [x] @ncdc\n- [x] @sttts\n- [x] @davidfestal\n- @stevekuznetsov\n- [x] @jmprusi\n- @s-urbaniak\n- [x] @davidfestal\n- [x] @shawn-hurley (will update based on feedback)\n- [x] @qiujian16\n- @csams\n- @kylape\n- [x] @p0lyn0mial\n\nplease 👍 here to indicate you're good with what is shown in this PR with regards to the code areas and responsibilities. Thank you!", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kcp", + "sandbox", + "app-definition", + "approved", + "lgtm" + ], + "category": "workloads", + "cncfProjects": [ + "kcp" + ], + "targetResourceKinds": [ + "Role" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kcp-dev/kcp/pull/1601", + "sourceRepo": "kcp-dev/kcp", + "reactions": 2, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:31.808Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kcp/kcp-1708-support-for-local-cluster-services-dns-resolution.json b/solutions/cncf-generated/kcp/kcp-1708-support-for-local-cluster-services-dns-resolution.json new file mode 100644 index 00000000..7ef97c3a --- /dev/null +++ b/solutions/cncf-generated/kcp/kcp-1708-support-for-local-cluster-services-dns-resolution.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:30.845Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kcp: ✨ Support for local cluster services DNS resolution", + "description": "## Summary\n\nConfigure synced deployments DNS Config to point to kcp DNS resolver mapping local namespaces to physical namespaces for cluster local services.\n \n## Related issue(s)\n\nFixes #505 \nFixes #1465", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@lionelvillard, thanks for the PR! \n\nA couple of comments:\n\nI see that the PR doesn't take into account the workspace of the Pod that is making the request, so that could lead to conflicts in the namespace resolution, given the following scenario:\n\n1. Pod A running in Namespace A belongs to workspace A\n1. Service B in Namespace B belongs to workspace B\n1. Service B in Namespace B belongs to workspace A\n\nif POD A tries to resolve the service B in namespace B. Currently, the plugin can respond with a service from another Workspace (B)\n\nTo solve this, the plugin has to be aware of the Workspace of the querying POD and filter accordingly.\n\nAlso, wiring up the Syncer PODs IP to the dnsConfig means that if the syncer pod gets recreated (update, scale...), all the currently deployed pods/deployments will need to be resynced. Perhaps a service that fronts the syncer deployment with a cluster IP could be a better solution?\n\nThank you!", + "steps": [ + "Pod A running in Namespace A belongs to workspace A", + "Service B in Namespace B belongs to workspace B", + "Service B in Namespace B belongs to workspace A" + ] + } + }, + "metadata": { + "tags": [ + "kcp", + "sandbox", + "app-definition", + "approved", + "lgtm", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "kcp" + ], + "targetResourceKinds": [ + "Deployment", + "Service", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kcp-dev/kcp/pull/1708", + "sourceRepo": "kcp-dev/kcp", + "reactions": 3, + "comments": 30 + }, + "security": { + "scannedAt": "2026-02-27T17:48:30.845Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kcp/kcp-2819-tunnel-validate-namespace-pod-at-the-syncer-side.json b/solutions/cncf-generated/kcp/kcp-2819-tunnel-validate-namespace-pod-at-the-syncer-side.json new file mode 100644 index 00000000..98ce426d --- /dev/null +++ b/solutions/cncf-generated/kcp/kcp-2819-tunnel-validate-namespace-pod-at-the-syncer-side.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:35.030Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kcp: 🌱 Tunnel: Validate namespace/pod at the syncer side", + "description": "## Summary\n\nThis PR adds an extra layer of security at the syncer side of the tunneler to ensure that the proxied requests are against a namespace owned by the syncer and a pod in upsync state. \n\n## Related issue(s)\n\nFixes https://github.com/kcp-dev/kcp/issues/1975", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/retest", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kcp", + "sandbox", + "app-definition", + "approved", + "lgtm" + ], + "category": "workloads", + "cncfProjects": [ + "kcp" + ], + "targetResourceKinds": [ + "Pod", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kcp-dev/kcp/pull/2819", + "sourceRepo": "kcp-dev/kcp", + "reactions": 1, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:48:35.030Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kcp/kcp-2945-update-to-kubernetes-1-26-3.json b/solutions/cncf-generated/kcp/kcp-2945-update-to-kubernetes-1-26-3.json new file mode 100644 index 00000000..f7ff332c --- /dev/null +++ b/solutions/cncf-generated/kcp/kcp-2945-update-to-kubernetes-1-26-3.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:34.046Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kcp: ✨ Update to Kubernetes 1.26.3", + "description": "## Summary\nUpdate to Kubernetes 1.26.3.\n\n## Todos\n- [x] Unit tests pass\n- [x] Boilerplate\n- [x] Lint\n- [ ] Update go.mod to real Kube branch\n- [x] Enable ValidatingAdmissionPolicy\n\n## Related issue(s)\n\nFixes #2772", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/test all", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "labelclusterrolebinding_controller.go 23995 202512 -157,626\r\nlogicalcluster_resource_deletor.go 741 2533 -109,469\r\nworkspace_reconcile_deletion.go 327 1023 -103,111\r\ngraph_builder.go 80571 32612 84,7459\r\nworkspace_reconcile_phase.go 3724 1785 70,3939\r\nresource_quota_monitor.go 100594 53081 61,8357\r\ninformer.go 22115 14343 42,6354\r\ngarbagecollector_patch.go 21653 14898 36,9621\r\nresource_quota_controller_patch.go 10893 7532 36,483\r\nshared_informer.go 34637 24698 33,5013\r\nresource_quota_controller.go 1923 1395 31,8264\r\napiexport_controller.go 1274 930 31,216\r\nlogicalcluster_deletion_controller.go 5573 7525 -29,8061\r\napibinding_controller.go 40111 29892 29,1959\r\nreflector.go 1315 987 28,497\r\nlabelclusterrolebinding_reconcile.go 3501 2639 28,0782\r\nstatusless_committer.go 3750 2836 27,7558\r\nworkspace_controller.go 11578 8770 27,5998\r\nbootstrap_reconcile.go 888 681 26,3862\r\nreplication_reconcile.go 5831 4645 22,6422\r\nbootstrap.go 1088 872 22,0408\r\nlabellogicalcluster_reconcile.go 277 224 21,1577\r\napiexport_apireconciler_controller.go 1856 1516 20,1661\r\napiexport_apireconciler_reconcile.go 715 590 19,1571\r\napibindingannotation_controller.go 26255 22282 16,371\r\ncommitter.go 23959 20925 13,5193\r\napibinding_deletion_controller.go 10983 9596 13,4798\r\ndefaultlocation_controller.go 556 487 13,2311\r\nsynctargetexports_controller.go 807 707 13,21\r\nheartbeat_controller.go 875 769 12,8954\r\nsynctarget_controller.go 1041 928 11,4779\r\nworkspacetype_controller.go 239 217 9,64912\r\nlabelclusterrole_controller.go 110452 100791 9,14681\r\nsyncer_apireconciler_controller.go 656 599 9,08367\r\npermissionclaimlabel_resource_reconcile.go 4294 3926 8,95377\r\napibinder_initializer_reconcile.go 6943 6392 8,26397\r\nsyncer_apireconciler_reconcile.go 6945 6424 7,79415\r\nplacement_controller.go 2078 1932 7,2818\r\ncontroller.go 11017 10291 6,81434\r\ncacher.go 892 834 6,72074\r\ndecorator.go 2487 2327 6,64728\r\npublisher.go 1891 1782 5,9352\r\napibinder_initializer_controller.go 10361 9773 5,84087\r\ngarbagecollector.go 1258 1190 5,55556\r\nlabellogicalcluster_controller.go 34548 36446 -5,34693\r\nworkspace_reconcile_scheduling.go 1833 1739 5,26316" + ] + } + }, + "metadata": { + "tags": [ + "kcp", + "sandbox", + "app-definition", + "needs-rebase", + "kind-api-change" + ], + "category": "workloads", + "cncfProjects": [ + "kcp" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kcp-dev/kcp/pull/2945", + "sourceRepo": "kcp-dev/kcp", + "reactions": 1, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:48:34.046Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kcp/kcp-3140-sparkles-update-to-kubernetes-1-30.json b/solutions/cncf-generated/kcp/kcp-3140-sparkles-update-to-kubernetes-1-30.json new file mode 100644 index 00000000..d4c5a5ec --- /dev/null +++ b/solutions/cncf-generated/kcp/kcp-3140-sparkles-update-to-kubernetes-1-30.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:32.931Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kcp: :sparkles: Update to Kubernetes 1.30", + "description": "## Summary\n\nThis updates kcp to Kubernetes 1.30.0. The corresponding branch for our Kubernetes fork is [kcp-1.30](https://github.com/kcp-dev/kubernetes/tree/kcp-1.30). Mainly this PR adds new APIs or removes feature gates where they have been removed by upstream.\n\nThe bigger changes in this PR are:\n\n- Updating to the new ValidatingAdmissionPolicy plugin infrastructure.\n- Rewrite the OpenAPI controller we recently added to accommodate for new upstream types.\n\nThe following bigger TODOs exist, but I would like to tackle them separately:\n\n- Look at enabling the flags I've disabled in this PR, most notably `--authentication-config`, which seems like a hugely useful feature for a multi-tenant API server like KCP.\n\n## Related issue(s)\n\nFixes #3068\n\n## Release Notes\n\n```release-note\nUpdate to Kubernetes 1.30\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/retest", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "🥳 🔥 🎆 \n/retest\n/lgtm\r\n/approve\r\n/hold\r\n@sttts wanna read through or fix on master? :) \nLGTM label has been added.
Git tree hash: 2c7ea837cae12d637bf20e30cd8bef62fb6bd5bc
\n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *mjudeikis*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=kcp-dev%2Fkcp).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[OWNERS](https://github.com/kcp-dev/kcp/blob/main/OWNERS)~~ [mjudeikis]\n- ~~[config/crds/OWNERS](https://github.com/kcp-dev/kcp/blob/main/config/crds/OWNERS)~~ [mjudeikis]\n- ~~[pkg/admission/OWNERS](https://github.com/kcp-dev/kcp/blob/main/pkg/admission/OWNERS)~~ [mjudeikis]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\nModulo the e2e style comments, lgtm.\nLooks like the failing tests aren't a flake:" + ] + } + }, + "metadata": { + "tags": [ + "kcp", + "sandbox", + "app-definition", + "kind-feature", + "approved", + "lgtm", + "kind-api-change", + "size-xxl", + "release-note", + "dco-signoff--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kcp" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kcp-dev/kcp/pull/3140", + "sourceRepo": "kcp-dev/kcp", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:48:32.931Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-1591-microsoft-sql-server-mssql-scaler-implementation.json b/solutions/cncf-generated/keda/keda-1591-microsoft-sql-server-mssql-scaler-implementation.json new file mode 100644 index 00000000..802921a5 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-1591-microsoft-sql-server-mssql-scaler-implementation.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:36.453Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: Microsoft SQL Server (MSSQL) scaler implementation", + "description": "This PR adds a built-in scaler that uses a Microsoft SQL Server (MSSQL) database as the event source. It's very similar to both the existing mysql and postgresql scalers and is intended to work with both self-hosted SQL Server databases and SQL Server databases hosted in managed clouds.\n\nDocumentation PR: https://github.com/kedacore/keda-docs/pull/367\n\nThere is still more work to do on this PR, which is tracked in the checklist below. This also includes end-to-end testing. However, I wanted to open it early to hopefully get some early feedback since this is both my first contribution to KEDA and my first time writing code in Go (besides hello world). Feedback is greatly appreciated. 🙏🏽\n\n### Checklist\n\n- [x] Commits are signed with Developer Certificate of Origin (DCO - [learn more](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#developer-certificate-of-origin-signing-your-work))\n- [x] Unit tests have been added\n- [x] End-to-end tests have been added\n- [x] ~A PR is opened ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Documentation for a new MSSQL scaler implementation.\n\nRelated scaler PR: https://github.com/kedacore/keda/pull/1591\nRelated backlog item: https://github.com/kedacore/keda/issues/674", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kedacore/keda/pull/1591", + "sourceRepo": "kedacore/keda", + "reactions": 4, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:43:36.453Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-2309-add-support-for-the-value-metric-type-in-all-scalers.json b/solutions/cncf-generated/keda/keda-2309-add-support-for-the-value-metric-type-in-all-scalers.json new file mode 100644 index 00000000..3abcbfc7 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-2309-add-support-for-the-value-metric-type-in-all-scalers.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:39.463Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: Add support for the \"Value\" metric type in all scalers", + "description": "Add support for `ValueMetricType` in all scalers except cpu/memory by introducing a new optional `metricType` field to the ScaleTrigger spec. The field will default to `AverageValueMetricType` to maintain backward compatibility.\n\nNotes:\n- The `Utilization` metric type is not supported for external metrics, only `Value` or `AverageValue`.\n- The CPU/memory scalers will continue to support the `Utilization` and `AverageValue` metric types.\n- The fallback capability will continue to be supported for the `AverageValue` metric type only.\n\n### Checklist\n\n- [x] Commits are signed with Developer Certificate of Origin (DCO - [learn more](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#developer-certificate-of-origin-signing-your-work))\n- [x] Tests have been added\n- [x] A PR is opened to update the documentation on ([repo](https://github.com/kedacore/keda-docs)) *(if applicable)*: https://github.com/kedacore/keda-docs/pull/657\n- [x] Changelog has been updated\n\nFixes #2030.\n\nSigned-off-", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/run-e2e\n**Update:** You can check the progres [here](https://github.com/kedacore/keda/actions/runs/1501576197)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "scalers/rabbitmq-queue-http.test.ts\r\nscalers/rabbitmq-queue-http-regex-vhost.test.ts\r\nscalers/rabbitmq-queue-http-regex.test.ts", + "***.648543265835394***e+09\tERROR\tcontroller.scaledobject\tFailed to create new HPA in cluster\t***\"reconciler group\": \"keda.sh\", \"reconciler kind\": \"ScaledObject\", \"name\": \"test-scaledobject\", \"namespace\": \"rabbitmq-queue-http-regex-vhost-test\", \"HPA.Namespace\": \"rabbitmq-queue-http-regex-vhost-test\", \"HPA.Name\": \"keda-hpa-test-scaledobject\", \"error\": \"HorizontalPodAutoscaler.autoscaling \\\"keda-hpa-test-scaledobject\\\" is invalid: [spec.metrics[0].external.target.type: Required value: must specify a metric target type, spec.metrics[0].external.target.type: Invalid value: \\\"\\\": must be either Utilization, Value, or AverageValue]\"***\r\ngithub.com/kedacore/keda/v2/controllers/keda.(*ScaledObjectReconciler).ensureHPAForScaledObjectExists\r\n\t/workspace/controllers/keda/scaledobject_controller.go:36***\r\ngithub.com/kedacore/keda/v2/controllers/keda.(*ScaledObjectReconciler).reconcileScaledObject\r\n\t/workspace/controllers/keda/scaledobject_controller.go:229\r\ngithub.com/kedacore/keda/v2/controllers/keda.(*ScaledObjectReconciler).Reconcile\r\n\t/workspace/controllers/keda/scaledobject_controller.go:***80\r\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Reconcile\r\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.***.0/pkg/internal/controller/controller.go:***4\r\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\r\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.***.0/pkg/internal/controller/controller.go:3***\r\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\r\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.***.0/pkg/internal/controller/controller.go:266\r\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\r\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.***.0/pkg/internal/controller/controller.go:227\r\n***.648543265835483e+09\tERROR\tcontroller.scaledobject\tFailed to ensure HPA is correctly created for ScaledObject\t***\"reconciler group\": \"keda.sh\", \"reconciler kind\": \"ScaledObject\", \"name\": \"test-scaledobject\", \"namespace\": \"rabbitmq-queue-http-regex-vhost-test\", \"error\": \"HorizontalPodAutoscaler.autoscaling \\\"keda-hpa-test-scaledobject\\\" is invalid: [spec.metrics[0].external.target.type: Required value: must specify a metric target type, spec.metrics[0].external.target.type: Invalid value: \\\"\\\": must be either Utilization, Value, or AverageValue]\"***" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kedacore/keda/pull/2309", + "sourceRepo": "kedacore/keda", + "reactions": 4, + "comments": 36 + }, + "security": { + "scannedAt": "2026-02-27T17:43:39.463Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-2418-add-predictkube-scaler.json b/solutions/cncf-generated/keda/keda-2418-add-predictkube-scaler.json new file mode 100644 index 00000000..27fcba8f --- /dev/null +++ b/solutions/cncf-generated/keda/keda-2418-add-predictkube-scaler.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:31.415Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: Add PredictKube scaler", + "description": "This PR adds a new PredictKube scaler, ready-to-use for the community.\nPredictKube - is a tool for proactive scaling based on the AI model’s prediction. \n\nRelated docs PR: [https://github.com/kedacore/keda-docs/pull/617](https://github.com/kedacore/keda-docs/pull/617)\n\nThis is an example of what the TriggerAuthentication and the ScaledObject definitions would look like:\n```yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: predictkube-secrets\ntype: Opaque\ndata:\n apiKey: # Required: base64 encoded value of PredictKube apiKey\n---\napiVersion: keda.sh/v1alpha1\nkind: TriggerAuthentication\nmetadata:\n name: keda-trigger-auth-predictkube-secret\nspec:\n secretTargetRef:\n - parameter: apiKey\n name: predictkube-secrets\n key: apiKey\n---\napiVersion: keda.sh/v1alpha1\nkind: ScaledObject\nmetadata:\n name: example-app-scaler\nspec:\n scaleTargetRef:\n name: example-app\n pollingInterval: 60\n cooldownPeriod: 300\n minReplicaCount: 3\n maxReplicaCount: 50\n triggers:\n - type: predictkube\n ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR adds documentation for a new PredictKube Scaler.\n\n### Checklist\n\n- [x] Commits are signed with Developer Certificate of Origin (DCO)\n\nRelates to https://github.com/kedacore/keda/pull/2418 & https://github.com/kedacore/keda/issues/2458", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: v1\r\nkind: Secret\r\nmetadata:\r\n name: predictkube-secrets\r\ntype: Opaque\r\ndata:\r\n apiKey: # Required: base64 encoded value of PredictKube apiKey\r\n---\r\napiVersion: keda.sh/v1alpha1\r\nkind: TriggerAuthentication\r\nmetadata:\r\n name: keda-trigger-auth-predictkube-secret\r\nspec:\r\n secretTargetRef:\r\n - parameter: apiKey\r\n name: predictkube-secrets\r\n key: apiKey\r\n---\r\napiVersion: keda.sh/v1alpha1\r\nkind: ScaledObject\r\nmetadata:\r\n name: example-app-scaler\r\nspec:\r\n scaleTargetRef:\r\n name: example-app\r\n pollingInterval: 60\r\n cooldownPeriod: 300\r\n minReplicaCount: 3\r\n maxReplicaCount: 50\r\n triggers:\r\n - type: predictkube\r\n metadata:\r\n predictHorizon: \"2h\"\r\n historyTimeWindow: \"7d\" # We recomend to use minimum 7-14 days time window as historical data\r\n prometheusAddress: http://kube-prometheus-stack-prometheus.monitoring:9090\r\n metricName: http_requests_total # Note: name to identify the metric, generated value would be `predictkube-http_requests_total`\r\n query: sum(irate(http_requests_total{pod=~\"example-app-.*\"}[2m]))\r\n queryStep: \"2m\" # Note: query step duration for range prometheus queries\r\n threshold: '2000' # Value to start scaling for\r\n authenticationRef:\r\n name: keda-trigger-auth-predictkube-secret" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kedacore/keda/pull/2418", + "sourceRepo": "kedacore/keda", + "reactions": 11, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:43:31.416Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-3021-add-apache-pulsar-scaler.json b/solutions/cncf-generated/keda/keda-3021-add-apache-pulsar-scaler.json new file mode 100644 index 00000000..1b7f3d77 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-3021-add-apache-pulsar-scaler.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:45.265Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: Add Apache Pulsar Scaler", + "description": "_Provide a description of what has been changed_\n\n### Checklist\n\n- [x] Commits are signed with Developer Certificate of Origin (DCO - [learn more](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#developer-certificate-of-origin-signing-your-work))\n- [x] Tests have been added\n- [ ] A PR is opened to update our Helm chart ([repo](https://github.com/kedacore/charts)) *(if applicable, ie. when deployment manifests are modified)*\n- [x] A PR is opened to update the documentation on ([repo](https://github.com/kedacore/keda-docs)) *(if applicable)*\n- [ ] Changelog has been updated and is aligned with our [changelog requirements](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#Changelog)\n\nFixes #853 \n\nThis PR is base on #1666\n\ndoc PR --> https://github.com/kedacore/keda-docs/pull/761", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> Whant can i do for it and how can i check it myself, thanks!\n\nhey @liangyuanpeng ,\nThanks a ton for this new scaler! ❤️ ❤️ \nThat specific CI only passes if e2e tests have passed. (it's a new mechanism that we have introduced to block PRs without e2e test). It's not mandatory because we can add the label manually if the PR doesn't need them.\nIn this case, talking about new scaler, it'd be nice if you can [add some e2e tests](https://github.com/kedacore/keda/tree/main/tests/scalers).\nBTW, [there is other CI failing](https://github.com/kedacore/keda/runs/6439328615?check_suite_focus=true) that requires changes in the code. Basically you should sort alphabetically the scalers in `scale_handler.go` and some code fixes", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Error: Label check failed: required any of ok-to-merge, but found 0." + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kedacore/keda/pull/3021", + "sourceRepo": "kedacore/keda", + "reactions": 3, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:43:45.265Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-3828-feat-provide-support-for-explicitly-pausing-autoscaling-of-scaledjobs.json b/solutions/cncf-generated/keda/keda-3828-feat-provide-support-for-explicitly-pausing-autoscaling-of-scaledjobs.json new file mode 100644 index 00000000..37f4229b --- /dev/null +++ b/solutions/cncf-generated/keda/keda-3828-feat-provide-support-for-explicitly-pausing-autoscaling-of-scaledjobs.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:35.043Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: feat: Provide support for explicitly pausing autoscaling of ScaledJobs", + "description": "Introduce paused annotation for scaledJobs \n### Checklist\n\n- [x] When introducing a new scaler, I agree with the [scaling governance policy](https://github.com/kedacore/governance/blob/main/SCALERS.md)\n- [x] Commits are signed with Developer Certificate of Origin (DCO - [learn more](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#developer-certificate-of-origin-signing-your-work))\n- [x] Tests have been added\n- [x] A PR is opened to update our Helm chart ([repo](https://github.com/kedacore/charts)) *(if applicable, ie. when deployment manifests are modified)*\n- [x] A PR is opened to update the documentation on ([repo](https://github.com/kedacore/keda-docs)) *(if applicable)*\n- [x] Changelog has been updated and is aligned with our [changelog requirements](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#Changelog)\n\nFixes #3303", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "--- Accreditation ---\n\nOriginal PR: https://github.com/kedacore/keda/pull/3828\n\nBorrows from the work originally implemented by: https://github.com/keegancwinchester\n\nNote: I would have loved to have pulled the commit from the original branch, but I could not be able to.\n\nDocumentation MR by original implementor: https://github.com/kedacore/keda-docs/pull/932\n\n--- Fixes ---\n\nFixes https://github.com/kedacore/keda/issues/3303\n\n--- PR Notes ---\n\nIntroduce annotation to pause ScaledJobs.\n\n### Checklist\n\n- [x] When introducing a new scaler, I agree with the [scaling governance policy](https://github.com/kedacore/governance/blob/main/SCALERS.md)\n- [x] I have verified that my change is according to the [deprecations & breaking changes policy](https://github.com/kedacore/governance/blob/main/DEPRECATIONS.md)\n- [x] Tests have been added\n- [x] Changelog has been updated and is aligned with our [changelog requirements](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#Changelog)\n- [x] A PR is opened to update our Helm chart ([repo](https://github.com/kedacore/charts)) *(if applicable, ie. when deployment manifests are modified)*\n- [x] A PR is opened to update the documentation on ([repo](https://github.com/kedacore/keda-docs)) *(if applicable)*\n- [x] Commits are signed with Developer Certificate of Origin (DCO - [learn more](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#developer-certificate-of-origin-signing-your-work))", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Deployment", + "Job" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kedacore/keda/pull/3828", + "sourceRepo": "kedacore/keda", + "reactions": 4, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:43:35.043Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-4821-feat-add-support-for-formula-based-evaluation-of-metrics-values.json b/solutions/cncf-generated/keda/keda-4821-feat-add-support-for-formula-based-evaluation-of-metrics-values.json new file mode 100644 index 00000000..0fd63b60 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-4821-feat-add-support-for-formula-based-evaluation-of-metrics-values.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:38.144Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: feat: Add support for formula based evaluation of metrics values", + "description": "So I have created ([this big PR](https://github.com/kedacore/keda/pull/4583)) but after some comments from others decided to separate it into 2 smaller PRs. So I extracted the functionality of formula and kept it here.\n\n## **WHATS NEW**\n\nThis change introduces a new feature I call \"formula\". The definition lies in SO (see below).\n```yaml\n// scaledObject.yaml definition\nspec:\n advanced:\n scalingModifiers:\n target: \"2\"\n formula: \"(mapi_trig + kw_trig)/2\"\n...\n triggers:\n - type: kubernetes-workload\n name: kw_trig\n metadata:\n podSelector: 'pod=workload-test'\n value: '1'\n - type: metrics-api\n name: mapi_trig\n metadata:\n targetValue: \"2\"\n url: \"https://mockbin.org/bin/336a8d99-9e09-4f1f-979d-851a6d1b1423\"\n valueLocation: \"tasks\"\n````\n\n- When formula is defined, it creates a new `composite-scaler-metric` that is passed on to HPA (`hpa.go`). HPA will make a request for this `composite-metric` ONLY instead of all external metrics. (resource ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "provide documentation for new feature, which is defined in SO.\nhere -> [new feature implementation](https://github.com/kedacore/keda/pull/4821)\n### Checklist\n\n- [x] Commits are signed with Developer Certificate of Origin (DCO)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "// scaledObject.yaml definition\r\nspec:\r\n advanced:\r\n scalingModifiers:\r\n target: \"2\"\r\n formula: \"(mapi_trig + kw_trig)/2\"\r\n...\r\n triggers:\r\n - type: kubernetes-workload\r\n name: kw_trig\r\n metadata:\r\n podSelector: 'pod=workload-test'\r\n value: '1'\r\n - type: metrics-api\r\n name: mapi_trig\r\n metadata:\r\n targetValue: \"2\"\r\n url: \"https://mockbin.org/bin/336a8d99-9e09-4f1f-979d-851a6d1b1423\"\r\n valueLocation: \"tasks\"", + "advanced:\r\n modifiers:\r\n target: \"2\"\r\n formula: \"xxx\"\r\n external:\r\n - name: grpc_one\r\n url: localhost\r\n timeout: 10" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kedacore/keda/pull/4821", + "sourceRepo": "kedacore/keda", + "reactions": 4, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:43:38.145Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-5496-add-option-to-the-datadog-scaler-to-use-the-cluster-agent-as-proxy.json b/solutions/cncf-generated/keda/keda-5496-add-option-to-the-datadog-scaler-to-use-the-cluster-agent-as-proxy.json new file mode 100644 index 00000000..8f2038a4 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-5496-add-option-to-the-datadog-scaler-to-use-the-cluster-agent-as-proxy.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:41.708Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: Add option to the Datadog scaler to use the Cluster Agent as proxy", + "description": "Add option to the Datadog scaler to use the Datadog Cluster Agent as proxy to obtain the metrics, instead of calling the REST API directly.\n\nThis avoids duplication and also improves the rate limiting issue that the Datadog scaler currently has, as the Cluster Agent gets the metric values in batches.\n\nImplements: #5355\n\n### Checklist\n\n- [x] Tests have been added\n- [x] Changelog has been updated and is aligned with our [changelog requirements](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#Changelog)\n- [X] A PR is opened to update the documentation on ([repo](https://github.com/kedacore/keda-docs)) *(if applicable)*\n- [X] Commits are signed with Developer Certificate of Origin (DCO - [learn more](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#developer-certificate-of-origin-signing-your-work))\n\nFixes #5355\n\nDocs: https://github.com/kedacore/keda-docs/pull/1310", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/run-e2e datadog\n**Update:** You can check the progress [here](https://github.com/kedacore/keda/actions/runs/8634057347)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kedacore/keda/pull/5496", + "sourceRepo": "kedacore/keda", + "reactions": 4, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:43:41.708Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-5571-fix-allow-scaledobject-minreplicacount-to-be-set-to-0.json b/solutions/cncf-generated/keda/keda-5571-fix-allow-scaledobject-minreplicacount-to-be-set-to-0.json new file mode 100644 index 00000000..54d27f29 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-5571-fix-allow-scaledobject-minreplicacount-to-be-set-to-0.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:40.554Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: fix: Allow ScaledObject.MinReplicaCount to be set to 0", + "description": "Makes ScaledObject API to be closer to ScaleJob API.\nRenames functions to MinReplicaCount and MaxReplicaCount.\nUpdates doc strings to correspond new names.\nSet MinReplicaCount default to 0 as written in the doc:\nhttps://keda.sh/docs/1.5/concepts/scaling-deployments/#overview.\nMoves HPA related hacks into hpa.go.\n\nFixes: #5570.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/run-e2e\n**Update:** You can check the progress [here](https://github.com/kedacore/keda/actions/runs/8186018195)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition", + "stale" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Deployment", + "Job" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kedacore/keda/pull/5571", + "sourceRepo": "kedacore/keda", + "reactions": 4, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:43:40.554Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-6169-refactor-selenium-grid-scaler.json b/solutions/cncf-generated/keda/keda-6169-refactor-selenium-grid-scaler.json new file mode 100644 index 00000000..0dec25d0 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-6169-refactor-selenium-grid-scaler.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:33.495Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: Refactor Selenium Grid scaler", + "description": "_Provide a description of what has been changed_\n\n1. **Selenium Grid Scaler**: Improve logic based on node stereotypes, node sessions and queue requests capabilities ([#6080](https://github.com/kedacore/keda/issues/6080))\n - Enhanced KEDA scaler behavior, addressing:\n - Node with different platformName scaling: Fixed incorrect scaling behavior when both Linux and Windows node stereotypes are present ([#1925](https://github.com/SeleniumHQ/docker-selenium/issues/1925)).\n - Excessive autoscaling: Resolved over-scaling of browser nodes ([#2160](https://github.com/SeleniumHQ/docker-selenium/issues/2160)).\n2. Added one more parameter `nodeMaxSessions` (default is 1), to configure the scaler to be aligned with option `--max-sessions` (env var `SE_NODE_MAX_SESSIONS`) in the Node. It helps the calculation be better.\n3. **Selenium Grid Scaler**: Support for Username and Password Authentication of Grid GraphQL endpoint ([#6144](https://github.com/kedacore/keda/issues/6144))\n4. Update all u", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fix scale with windows nodes in cluster\n\n### Checklist\n\n- [x] When introducing a new scaler, I agree with the [scaling governance policy](https://github.com/kedacore/governance/blob/main/SCALERS.md)\n- [x] I have verified that my change is according to the [deprecations & breaking changes policy](https://github.com/kedacore/governance/blob/main/DEPRECATIONS.md)\n- [x] Tests have been added\n- [x] Changelog has been updated and is aligned with our [changelog requirements](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#Changelog)\n- [x] A PR is opened to update our Helm chart ([repo](https://github.com/kedacore/charts)) *(if applicable, ie. when deployment manifests are modified)*\n- [x] A PR is opened to update the documentation on ([repo](https://github.com/kedacore/keda-docs)) *(if applicable)*\n- [x] Commits are signed with Developer Certificate of Origin (DCO - [learn more](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#developer-certificate-of-origin-signing-your-work))\n\nFixes #4908\n\nRelates to #", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kedacore/keda/pull/6169", + "sourceRepo": "kedacore/keda", + "reactions": 7, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:43:33.495Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-6191-add-scaler-for-temporal.json b/solutions/cncf-generated/keda/keda-6191-add-scaler-for-temporal.json new file mode 100644 index 00000000..d963d187 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-6191-add-scaler-for-temporal.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:29.861Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: add scaler for temporal", + "description": "Implement a temporal scaler\n\n### Checklist\n\n- [x] When introducing a new scaler, I agree with the [scaling governance policy](https://github.com/kedacore/governance/blob/main/SCALERS.md)\n- [x] Tests have been added\n- [x] Commits are signed with Developer Certificate of Origin (DCO - [learn more](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#developer-certificate-of-origin-signing-your-work))\n- [x] I have verified that my change is according to the [deprecations & breaking changes policy](https://github.com/kedacore/governance/blob/main/DEPRECATIONS.md)\n- [N/A] A PR is opened to update our Helm chart ([repo](https://github.com/kedacore/charts)) *(if applicable, ie. when deployment manifests are modified)*\n- [x] Changelog has been updated and is aligned with our [changelog requirements](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#Changelog)\n- [x] A PR is opened to update the documentation on ([repo](https://github.com/kedacore/keda-docs)) *(if applicable)*\n\nDo", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Implement a temporal scaler \n\n### Checklist\n\n- [x] When introducing a new scaler, I agree with the [scaling governance policy](https://github.com/kedacore/governance/blob/main/SCALERS.md)\n- [x] I have verified that my change is according to the [deprecations & breaking changes policy](https://github.com/kedacore/governance/blob/main/DEPRECATIONS.md)\n- [x] Tests have been added\n- [x] Changelog has been updated and is aligned with our [changelog requirements](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#Changelog)\n- [ ] A PR is opened to update our Helm chart ([repo](https://github.com/kedacore/charts)) *(if applicable, ie. when deployment manifests are modified)*\n- [x] A PR is opened to update the documentation on ([repo](https://github.com/kedacore/keda-docs)) *(if applicable)*\n- [x] Commits are signed with Developer Certificate of Origin (DCO - [learn more](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#developer-certificate-of-origin-signing-your-work))\n\nRelates to #4724", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "2024-10-08T08:54:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:54:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:55:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:55:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:56:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:56:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:57:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:57:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:57:57Z INFO scaleexecutor Successfully updated ScaleTarget {\"scaledobject.Name\": \"connector-mysql-2d9c6761\", \"scaledObject.Namespace\": \"borneo\", \"scaleTarget.Name\": \"connector-mysql-2d9c6761\", \"Original Replicas Count\": 0, \"New Replicas Count\": 1}\r\n2024-10-08T08:57:58Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:13Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:28Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:43Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:58Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:59:13Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:59:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}", + "temporal_test.go:268:\r\n \tError Trace:\t/Users/robholland/Developer/github.com/kedacore/keda/tests/scalers/temporal/temporal_test.go:268\r\n \t \t\t\t\t/Users/robholland/Developer/github.com/kedacore/keda/tests/scalers/temporal/temporal_test.go:241\r\n \tError: \tShould be true\r\n \tTest: \tTestTemporalScaler\r\n \tMessages: \treplica count should be 1 after 3 minutes", + "> temporal_test.go:268:\r\n> \tError Trace:\t/Users/robholland/Developer/github.com/kedacore/keda/tests/scalers/temporal/temporal_test.go:268\r\n> \t \t\t\t\t/Users/robholland/Developer/github.com/kedacore/keda/tests/scalers/temporal/temporal_test.go:241\r\n> \tError: \tShould be true\r\n> \tTest: \tTestTemporalScaler\r\n> \tMessages: \treplica count should be 1 after 3 minutes\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kedacore/keda/pull/6191", + "sourceRepo": "kedacore/keda", + "reactions": 13, + "comments": 43 + }, + "security": { + "scannedAt": "2026-02-27T17:43:29.862Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-6499-add-gitlab-runner-scaler.json b/solutions/cncf-generated/keda/keda-6499-add-gitlab-runner-scaler.json new file mode 100644 index 00000000..6a6ba675 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-6499-add-gitlab-runner-scaler.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:44.272Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: Add gitlab-runner-scaler", + "description": "_Provide a description of what has been changed_\n\n### Checklist\n\n- [x] When introducing a new scaler, I agree with the [scaling governance policy](https://github.com/kedacore/governance/blob/main/SCALERS.md)\n- [x] I have verified that my change is according to the [deprecations & breaking changes policy](https://github.com/kedacore/governance/blob/main/DEPRECATIONS.md)\n- [x] Tests have been added\n- [x] Changelog has been updated and is aligned with our [changelog requirements](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#Changelog)\n- [ ] A PR is opened to update our Helm chart ([repo](https://github.com/kedacore/charts)) *(if applicable, ie. when deployment manifests are modified)*\n- [ ] A PR is opened to update the documentation on ([repo](https://github.com/kedacore/keda-docs)) *(if applicable)*\n- [x] Commits are signed with Developer Certificate of Origin (DCO - [learn more](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#developer-certificate-of-origin-sign", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/run-e2e gitlab\n**Update:** You can check the progress [here](https://github.com/kedacore/keda/actions/runs/14091638961)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition", + "waiting-author-response", + "stale" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kedacore/keda/pull/6499", + "sourceRepo": "kedacore/keda", + "reactions": 4, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:43:44.272Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-6655-feat-add-fallback-support-for-value-metric-type.json b/solutions/cncf-generated/keda/keda-6655-feat-add-fallback-support-for-value-metric-type.json new file mode 100644 index 00000000..08397955 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-6655-feat-add-fallback-support-for-value-metric-type.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:43.097Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keda: feat: add fallback support for value metric type", + "description": "* Since the constraint on having fallback only for `AverageValue` seems to me kinda unwarranted, it's here relaxed a bit, and can be removed altogether if we opt for another implementation.\n\n The constraint now becomes that the `scaleTargetRef` object has a field `.status.readyReplicas` that its controller updates with the number of ready replicas, so that we can directly use that. This is de facto the case with `Deployments`/`StatefulSets`/`Replicasets`/Argo `Rollouts`.\n \n We can then generically fetch the object as `unstructured` and access the value of the field to divide by it. A brief math illustration starting with the HPA's equation\n\n `desiredReplicas = ceil [currentReplicas * (currentMetricValue/desiredMetricValue) ]`\n\n By passing `currentMetricValue = desiredMetricValue * fallbackReplicas / currentReplicas`\n \n We end up with\n\n `desiredReplicas = ceil [currentReplicas * (( desiredMetricValue * fallbackReplicas / currentReplicas )/desiredMetricValue) ]`\n\n", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is interesting indeed, the limitation of the metric type is something to fix. Sorry for not reviewing the PR, it was missed on my notification, I'm going to review it", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition", + "ok-to-merge" + ], + "category": "workloads", + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Statefulset", + "Replicaset" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kedacore/keda/pull/6655", + "sourceRepo": "kedacore/keda", + "reactions": 4, + "comments": 36 + }, + "security": { + "scannedAt": "2026-02-27T17:43:43.097Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kepler/kepler-1438-chore-pkg-bpf-replace-libbpfgo-with-cilium-ebpf.json b/solutions/cncf-generated/kepler/kepler-1438-chore-pkg-bpf-replace-libbpfgo-with-cilium-ebpf.json new file mode 100644 index 00000000..53844d3c --- /dev/null +++ b/solutions/cncf-generated/kepler/kepler-1438-chore-pkg-bpf-replace-libbpfgo-with-cilium-ebpf.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:37.706Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kepler: chore(pkg/bpf): Replace libbpfgo with cilium/ebpf", + "description": "cilium/ebpf is a pure Go eBPF package and is used in a number of popular\ncloud-native projects. The benefits to Kepler are:\n\n1. Bytecode is built using bpf2go and the C and Go structs are kept in\n sync automatically\n2. There is no requirement for Cgo anymore and therefore no requirement\n to have libbpf or libelf installed to compile and/or to be\n dynamically linked at runtime\n3. Simplified packaging as the correct bytecode is contained within the\n kepler binary\n\nOverall I'm happy with this change, but there is only one thing that\nbugs me.\n\nWe have to check in the bytecode object files (e.g kepler.bpfel.o) or\nthe Go tooling (go lint/go vet) complains about the missing files.\nI couldn't reliably get `go generate ./...` to work to compile these\nfiles in CI. This is something which should be relatively easy to fix\nin the Makefile/CI environment before we cut a release.\n\nDepends-On: #1435 \nFixes: #1424", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "As stated in PR #1468, the ebpf metric collection is not correct.\n\nThis PR is fixing multiple problems:\n\n- Fixed incorrect padding in some structures to ensure it is 8 bytes (unsigned long long).\n- Updated the schedule switch to collect only on-CPU time and ignore idle time. As detailed [here](https://litux.nl/mirror/kerneldevelopment/0672327201/ch03lev1sec1.html), during a schedule switch, the previous task might not have been in the running state. Inspired by the libbpf-tools [cpudist](https://github.com/iovisor/bcc/blob/c93a19aaf3f7eae9d6c9070309cc785c18575767/libbpf-tools/cpudist.bpf.c#L100), I modified the code to save data only if the task was previously running, thereby excluding idle or blocked times. Collecting data from a non-running task could lead to gathering hardware counters from another task that was actually running on the CPU.\n- Updated metric collection to gather hardware counter metrics only when CPU time can be reliably collected. This ensures the metrics are correlated, addressing issues where the start timestamp of the task might be missing or there could be clock issues.\n- Minimized Kepler overhead by aggregating thread data into process IDs within the eBPF code (kernel space). Note that the Kernel tgid is the user-space PID, and the Kernel pid is the user-scape TID.\n- Changed the eBPF code to use microseconds instead of milliseconds to improve precision and accurately identify when a process was not active.\n\nWith the fixes, both the `scaphandre` and ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "❯ sudo ./_output/bin/kepler\r\n\r\nI0522 09:33:18.758775 2437499 gpu.go:38] Trying to initialize GPU collector using dcgm\r\nW0522 09:33:18.758911 2437499 gpu_dcgm.go:104] There is no DCGM daemon running in the host: libdcgm.so not Found\r\nW0522 09:33:18.758976 2437499 gpu_dcgm.go:108] Could not start DCGM. Error: libdcgm.so not Found\r\nI0522 09:33:18.758997 2437499 gpu.go:45] Error initializing dcgm: not able to connect to DCGM: libdcgm.so not Found\r\nI0522 09:33:18.759008 2437499 gpu.go:38] Trying to initialize GPU collector using nvidia-nvml\r\nI0522 09:33:18.759111 2437499 gpu.go:45] Error initializing nvidia-nvml: failed to init nvml. ERROR_LIBRARY_NOT_FOUND\r\nI0522 09:33:18.759121 2437499 gpu.go:38] Trying to initialize GPU collector using dummy\r\nI0522 09:33:18.759134 2437499 gpu.go:42] Using dummy to obtain gpu power\r\nI0522 09:33:18.760886 2437499 exporter.go:85] Kepler running on version: v0.7.10-5-gf1097c0e2c498-dirty\r\nI0522 09:33:18.760897 2437499 config.go:283] using gCgroup ID in the BPF program: true\r\nI0522 09:33:18.760963 2437499 config.go:285] kernel version: 6.7\r\nI0522 09:33:18.761045 2437499 config.go:310] The Idle power will be exposed. Are you running on Baremetal or using single VM per node?\r\nI0522 09:33:18.761127 2437499 redfish.go:169] failed to get redfish credential file path\r\nI0522 09:33:18.761632 2437499 acpi.go:71] Could not find any ACPI power meter path. Is it a VM?\r\nI0522 09:33:18.763071 2437499 exporter.go:94] Number of CPUs: 8\r\nI0522 09:33:19.257525 2437499 watcher.go:67] Using in cluster k8s config\r\nI0522 09:33:19.257541 2437499 watcher.go:74] failed to get config: unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined\r\nI0522 09:33:19.257553 2437499 watcher.go:126] k8s APIserver watcher was not enabled\r\nI0522 09:33:19.257642 2437499 prometheus_collector.go:96] Registered Container Prometheus metrics\r\nI0522 09:33:19.257688 2437499 prometheus_collector.go:101] Registered VM Prometheus metrics\r\nI0522 09:33:19.257724 2437499 prometheus_collector.go:105] Registered Node Prometheus metrics\r\nI0522 09:33:19.257826 2437499 node_platform_energy.go:54] Failed to create Regressor/AbsPower Power Model to estimate Node Platform Power: open /var/lib/kepler/data/acpi_AbsPowerModel.json: no such fi\r\nle or directory\r\nI0522 09:33:19.257965 2437499 exporter.go:175] starting to listen on 0.0.0.0:8888\r\nI0522 09:33:19.257980 2437499 exporter.go:181] Started Kepler in 497.120003ms\r\npanic: runtime error: invalid memory address or nil pointer dereference\r\n[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x854f72]\r\n\r\ngoroutine 53 [running]:\r\ngithub.com/sustainable-computing-io/kepler/pkg/collector/stats/types.(*UInt64StatCollection).AddDeltaStat(0x0, {0x18b8388, 0x7}, 0x0)\r\n kepler/pkg/collector/stats/types/types.go:108 +0x32\r\ngithub.com/sustainable-computing-io/kepler/pkg/collector/resourceutilization/bpf.updateSWCounters(0x16a0640?, 0xc000190150, 0x1?)\r\n kepler/pkg/collector/resourceutilization/bpf/process_bpf_collector.go:40 +0xc5\r\ngithub.com/sustainable-computing-io/kepler/pkg/collector/resourceutilization/bpf.UpdateProcessBPFMetrics({0x1b40fe8, 0xc000692f00}, 0xc0000736d0?)\r\n kepler/pkg/collector/resourceutilization/bpf/process_bpf_collector.go:129 +0xa39\r\ngithub.com/sustainable-computing-io/kepler/pkg/collector.(*Collector).updateProcessResourceUtilizationMetrics(0xc002bfe4c0, 0x0?)\r\n kepler/pkg/collector/metric_collector.go:191 +0x5d\r\ngithub.com/sustainable-computing-io/kepler/pkg/collector.(*Collector).updateResourceUtilizationMetrics(0xc002bfe4c0)\r\n kepler/pkg/collector/metric_collector.go:155 +0x54\r\ngithub.com/sustainable-computing-io/kepler/pkg/collector.(*Collector).Update(0xb2d05e00?)\r\n kepler/pkg/collector/metric_collector.go:106 +0x53\r\ngithub.com/sustainable-computing-io/kepler/pkg/manager.(*CollectorManager).Start.func1()\r\n kepler/pkg/manager/manager.go:74 +0x7b\r\ncreated by github.com/sustainable-computing-io/kepler/pkg/manager.(*CollectorManager).Start in goroutine 1\r\n kepler/pkg/manager/manager.go:66 +0x65", + "❯ cd manifests/compose/dev\r\n❯ docker compose up --build kepler-dev\r\n\r\nkepler-dev-1 | I0521 23:32:42.275436 2436610 process_bpf_collector.go:88] process worker (pid=2202720, cgroup=1466240) has 0 task clock time 2344773 CPU cycles, 883246 instructions, 232766 cache miss\r\nes, 68 page cache hits\r\nkepler-dev-1 | panic: runtime error: invalid memory address or nil pointer dereference\r\nkepler-dev-1 | [signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x8788f2]\r\nkepler-dev-1 |\r\nkepler-dev-1 | goroutine 30 [running]:\r\nkepler-dev-1 | github.com/sustainable-computing-io/kepler/pkg/collector/stats/types.(*UInt64StatCollection).AddDeltaStat(0x0, {0x18ee6d1, 0x7}, 0x0)\r\nkepler-dev-1 | /workspace/pkg/collector/stats/types/types.go:108 +0x32\r\nkepler-dev-1 | github.com/sustainable-computing-io/kepler/pkg/collector/resourceutilization/bpf.updateSWCounters(0x16d05e0?, 0xc0003e01c0, 0x219c60?)\r\nkepler-dev-1 | /workspace/pkg/collector/resourceutilization/bpf/process_bpf_collector.go:40 +0xc5\r\nkepler-dev-1 | github.com/sustainable-computing-io/kepler/pkg/collector/resourceutilization/bpf.UpdateProcessBPFMetrics({0x1b7aea8, 0xc0005977c0}, 0xc00048aed0?)\r\nkepler-dev-1 | /workspace/pkg/collector/resourceutilization/bpf/process_bpf_collector.go:129 +0xa39\r\nkepler-dev-1 | github.com/sustainable-computing-io/kepler/pkg/collector.(*Collector).updateProcessResourceUtilizationMetrics(0xc002bcc500, 0x0?)\r\nkepler-dev-1 | /workspace/pkg/collector/metric_collector.go:191 +0x5d\r\nkepler-dev-1 | github.com/sustainable-computing-io/kepler/pkg/collector.(*Collector).updateResourceUtilizationMetrics(0xc002bcc500)\r\nkepler-dev-1 | /workspace/pkg/collector/metric_collector.go:155 +0x54\r\nkepler-dev-1 | github.com/sustainable-computing-io/kepler/pkg/collector.(*Collector).Update(0xb2d05e00?)\r\nkepler-dev-1 | /workspace/pkg/collector/metric_collector.go:106 +0x53\r\nkepler-dev-1 | github.com/sustainable-computing-io/kepler/pkg/manager.(*CollectorManager).Start.func1()\r\nkepler-dev-1 | /workspace/pkg/manager/manager.go:74 +0x7b\r\nkepler-dev-1 | created by github.com/sustainable-computing-io/kepler/pkg/manager.(*CollectorManager).Start in goroutine 1\r\nkepler-dev-1 | /workspace/pkg/manager/manager.go:66 +0x65\r\nkepler-dev-1 exited with code 2", + "Which is used by:" + ] + } + }, + "metadata": { + "tags": [ + "kepler", + "sandbox", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "kepler" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/sustainable-computing-io/kepler/pull/1438", + "sourceRepo": "sustainable-computing-io/kepler", + "reactions": 1, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:48:37.706Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-10754-allow-importing-realms-during-startup.json b/solutions/cncf-generated/keycloak/keycloak-10754-allow-importing-realms-during-startup.json new file mode 100644 index 00000000..c7578db5 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-10754-allow-importing-realms-during-startup.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:19.769Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Allow importing realms during startup", + "description": "* Re-enable the ability to import realms at startup similarly as when using the legacy distribution and the `keycloak.import` system property\n* Adds an `import-realm` option to both `start` and `start-dev` commands to import realm configuration files from the `data/import` directory. No need to manually specify files, reducing the chance of errors, and reducing the number of steps when importing realms using containers.\n* A new guide to cover the different strategies for importing and exporting realms.\n\nCloses #9261", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "fix #17850\n\nUpgrade keycloak to 17.0.0\n\nBreaking changes:\n- Keycloak upgrade introduced many breaking changes the main ones being:\n- the removal of /auth by default.\n- Moving from wildfly to quarkus which changes how ports are mapped, the server is configured...\n\nAlso when starting keycloak the first time with a script it runs it and stops, you need to restart keycloak for it keep running.\n\n---\n\nPlease make sure the below checklist is followed for Pull Requests.\n\n- [X] [All continuous integration tests](https://github.com/jhipster/generator-jhipster/actions) are green\n- [X] Tests are added where necessary\n- [X] The JDL part is updated if necessary\n- [X] [jhipster-online](https://github.com/jhipster/jhipster-online) is updated if necessary\n- [X] Documentation is added/updated where necessary\n- [X] Coding Rules & Commit Guidelines as per our [CONTRIBUTING.md document](https://github.com/jhipster/generator-jhipster/blob/main/CONTRIBUTING.md) are followed", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/10754", + "sourceRepo": "keycloak/keycloak", + "reactions": 14, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:44:19.769Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-17525-fix-negative-expire-values-for-refresh-tokens-and-expire-client-s.json b/solutions/cncf-generated/keycloak/keycloak-17525-fix-negative-expire-values-for-refresh-tokens-and-expire-client-s.json new file mode 100644 index 00000000..40aad691 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-17525-fix-negative-expire-values-for-refresh-tokens-and-expire-client-s.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:28.183Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Fix negative expire values for refresh tokens and expire client session when user session", + "description": "Closes https://github.com/keycloak/keycloak/issues/14854\nCloses https://github.com/keycloak/keycloak/issues/11990\n\nI have joined both PRs into one. They are related and modify the same code area. Adding tests to check both. I have respected the original authors.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Adding user session creation time when calculating client session lifespan\n\nNew client session note is added storing information about start timestamp of associated user session\n\nCloses #14854", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "java.lang.AssertionError: expected null, but was:\n\tat org.junit.Assert.fail(Assert.java:89)\n\tat org.junit.Assert.failNotNull(Assert.java:756)\n\tat org.junit.Assert.assertNull(Assert.java:738)\n\tat org.junit.Assert.assertNull(Assert.java:748)\n...", + "It failed in the [4th run](https://github.com/rmartinc/keycloak/actions/runs/5131184206/jobs/9231312787):", + "So, I don't think we can do much here. Except rechecking expiration, always, only if cross-dc,... Note that returning a expired session can cause negative times in tokens. I think that the flaky issue #16511 is the same cause.\r\n\r\nCorrection: This is just idle, not expiration. So maybe it's no so important. I'm going to check if I can improve the test to test N times it's expired by idle.\r\n\r\n\n@rmartinc The updated version in now green after 10 runs https://github.com/martin-kanis/keycloak/actions/runs/5130604990/jobs/9229608348\n> @rmartinc The updated version in now green after 10 runs https://github.com/martin-kanis/keycloak/actions/runs/5130604990/jobs/9229608348\r\n\r\n:heart_eyes: Great! But if you see `SessionTimeoutsTest#testOfflineUserClientIdleTimeoutSmallerThanSessionNoRefresh` is flaky. The reason is in my previous comment. Maybe checking idle so rigorously is not OK, I can improve the test to do N checks inside, and only fail if the idle is wrong after the N times. WDYT? \nOK, I have tested different things with the CI (as I cannot reproduce the issue in my laptop) with the profile `legacy-jpa+cross-dc-infinispan-offline-sessions-preloading` and trying to execute a single test 100 times:\r\n\r\n* testOfflineUserClientIdleTimeoutSmallerThanSessionNoRefresh: failed at 4th (check idle without any refresh).\r\n* testOfflineUserClientIdleTimeoutSmallerThanSessionOneRefresh: failed at 31st (check idle with one refresh).\r\n* testOfflineUserClientMaxLifespanSmallerThanSession: failed at 54th run (check max lifespan).\r\n\r\nSo, it's not just idle, it can happen in max lifespan too. It seems to be more probable with idle, but it also happens with max lifespan. So I'm not doing anything more, I will wait for @martin-kanis comments. As commented the only thing I see to improve this at keycloak side is always checking expiration before returning the sessions (or at least in some scenarios like cross-dc).\r\n\r\nNOTE: In my laptop I have executed `testOfflineUserClientIdleTimeoutSmallerThanSessionNoRefresh` 100 times without any issue. So maybe this is a just a problem of the CI env (multicast issues or similar on github actions nodes).\r\n\r\nCheers!\n## Unreported flaky test detected\nIf the below flaky tests below are affected by the changes, please review and update the changes accordingly. Otherwise, a maintainer should report the flaky tests prior to merging the PR.\n\n### org.keycloak.testsuite.model.session.SessionTimeoutsTest#testOfflineUserClientIdleTimeoutSmallerThanSessionNoRefresh\n\n[Keycloak CI - Store Model Tests]()" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "team-ui", + "flaky-test" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/17525", + "sourceRepo": "keycloak/keycloak", + "reactions": 11, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:44:28.183Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-22317-added-accountresource-spi-provider-and-providerfactory.json b/solutions/cncf-generated/keycloak/keycloak-22317-added-accountresource-spi-provider-and-providerfactory.json new file mode 100644 index 00000000..39749f8d --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-22317-added-accountresource-spi-provider-and-providerfactory.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:25.803Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: added AccountResource SPI, Provider and ProviderFactory. ", + "description": "updated AccuntLoader to load provider(s) and check if it is compatible with the chosen theme.\n\nCloses #22318", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Quickstart demonstrating overriding of Account Console resources \nIssue: https://github.com/keycloak/keycloak/issues/22318\nPR: https://github.com/keycloak/keycloak/pull/22317\n\n@garronej @ssilvert", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# build everything\r\nmvn clean install -DskipTests=true\r\ncd distribution\r\nmvn clean install\r\n\r\n# run testsuite for embedded\r\ncd ..\r\nmvn -f testsuite/integration-arquillian/pom.xml clean install", + "mvn -f testsuite/integration-arquillian/pom.xml clean install -Pauth-server-quarkus -Denforcer.skip -Dtest=OIDCProtocolMappersTest", + "org.openqa.selenium.NoSuchElementException: \nno such element: Unable to locate element: {\"method\":\"css selector\",\"selector\":\"#username\"}\n (Session info: headless chrome=117.0.5938.92)\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.14.0', revision: 'aacccce0', time: '2018-08-02T20:19:58.91Z'\n..." + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "team-ui", + "flaky-test" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/22317", + "sourceRepo": "keycloak/keycloak", + "reactions": 12, + "comments": 31 + }, + "security": { + "scannedAt": "2026-02-27T17:44:25.803Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-23303-use-email-verification-instead-of-executing-action-for-send-verif.json b/solutions/cncf-generated/keycloak/keycloak-23303-use-email-verification-instead-of-executing-action-for-send-verif.json new file mode 100644 index 00000000..9db998cf --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-23303-use-email-verification-instead-of-executing-action-for-send-verif.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:30.428Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Use email verification instead of executing action for `send-verify-email` endpoint", + "description": "Closes #15190\n\nAdd support for `send-verify-email` endpoint to use the `email-verification.ftl` instead of `executeActions.ftl`\n\nAlso introduce a new parameter `lifespan` to be able to override the default lifespan value (12 hours)", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Can we get a bump on this. This will fix an issue we are having", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/realms/{realm}/login-actions/action-token?key={token}" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "flaky-test" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/23303", + "sourceRepo": "keycloak/keycloak", + "reactions": 10, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:44:30.428Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-23517-12406-you-are-already-logged-in.json b/solutions/cncf-generated/keycloak/keycloak-23517-12406-you-are-already-logged-in.json new file mode 100644 index 00000000..56536680 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-23517-12406-you-are-already-logged-in.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:31.649Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: 12406 you are already logged in", + "description": "Closes #12406 \nThis is the draft for fixing \"You are already logged-in\" for most of the cases and makes the usability around this better.\n\nSome more info about the issue also in https://issues.redhat.com/browse/KEYCLOAK-5179\n\nThis prototype uses approach we discussed with @stianst and it works like this:\n\n1) In current Keycloak main, when user is authenticated in some browser tab (EG. tab1), the whole `RootAuthenticationSessionModel` is removed from the `authenticationSessions` cache. This means that other browser tabs are screwed and cannot continue with authentication due the corresponding authentication sessions removed\n\n2) This PR uses the approach when it does not remove whole `RootAuthenticationSessionModel`, but just single `AuthenticationSessionModel` of the current browser tab. The root session (together with KC_RESTART cookie) is removed just when there are no other browser tabs opened (which means no more `AuthenticationSessionModel` in this root auth session). There is also", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Unreported flaky test detected\nIf the below flaky tests below are affected by the changes, please review and update the changes accordingly. Otherwise, a maintainer should report the flaky tests prior to merging the PR.\n\n### org.keycloak.testsuite.webauthn.account.WebAuthnSigningInTest#createWebAuthnSameUserLabel\n\n[Keycloak CI - WebAuthn IT (chrome)]()\n\n```\njava.lang.AssertionError: Expected OIDCLogin but was (https://localhost:8543/auth/realms/test/protocol/openid-connect/auth?client_id=account-console&redirect_uri=https%3A%2F%2Flocalhost%3A8543%2Fauth%2Frealms%2Ftest%2Faccount%2F%23%2Fsecurity%2Fsigningin&state=b0b8a353-9750-4445-b7d9-b7a3fa39a08a&response_mode=fragment&response_type=code&scope=openid&nonce=a1e3fc90-b958-4eae-8df4-bf6cff8c19d0&code_challenge=7J5YR-iLv8XWJkrUsKvg7-jqhLQK6Uiu_mz67Fcx1l0&code_challenge_method=S256)\n\tat org.junit.Assert.fail(Assert.java:89)\n\tat org.junit.Assert.assertTrue(Assert.java:42)\n\tat org.keycloak.testsuite.page.AbstractPage.assertCurrent(AbstractPage.java:110)\n\tat jdk.internal.reflect.GeneratedMethodAccessor628.invoke(Unknown Source)\n...\n```\n[Report flaky test](https://github.com/keycloak/keycloak/issues/new?title=Flaky+test%3A+org.keycloak.testsuite.webauthn.account.WebAuthnSigningInTest%23createWebAuthnSameUserLabel&labels=flaky-test%2Carea%2Fci%2Ckind%2Fbug&body=%23%23+org.keycloak.testsuite.webauthn.account.WebAuthnSigningInTest%23createWebAuthnSameUserLabel%0A%5BKeycloak+CI+-+WebAuthn+IT+%28chrome%29%5D%28%29+%2F+%5BPull+Reques", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "java.lang.AssertionError: Expected OIDCLogin but was (https://localhost:8543/auth/realms/test/protocol/openid-connect/auth?client_id=account-console&redirect_uri=https%3A%2F%2Flocalhost%3A8543%2Fauth%2Frealms%2Ftest%2Faccount%2F%23%2Fsecurity%2Fsigningin&state=b0b8a353-9750-4445-b7d9-b7a3fa39a08a&response_mode=fragment&response_type=code&scope=openid&nonce=a1e3fc90-b958-4eae-8df4-bf6cff8c19d0&code_challenge=7J5YR-iLv8XWJkrUsKvg7-jqhLQK6Uiu_mz67Fcx1l0&code_challenge_method=S256)\n\tat org.junit.Assert.fail(Assert.java:89)\n\tat org.junit.Assert.assertTrue(Assert.java:42)\n\tat org.keycloak.testsuite.page.AbstractPage.assertCurrent(AbstractPage.java:110)\n\tat jdk.internal.reflect.GeneratedMethodAccessor628.invoke(Unknown Source)\n...", + "org.awaitility.core.ConditionTimeoutException: Assertion condition defined as a org.keycloak.testsuite.util.URLAssert URL expected to begin with: https://localhost:8543/auth/realms/test/protocol/openid-connect/auth ; actual URL: https://localhost:8543/auth/realms/test/account/#/applications within 10 seconds.\n\tat org.awaitility.core.ConditionAwaiter.await(ConditionAwaiter.java:167)\n\tat org.awaitility.core.AssertionCondition.await(AssertionCondition.java:119)\n\tat org.awaitility.core.AssertionCondition.await(AssertionCondition.java:31)\n\tat org.awaitility.core.ConditionFactory.until(ConditionFactory.java:985)\n...", + "java.lang.AssertionError: Expected OIDCLogin but was (https://localhost:8543/auth/realms/test/protocol/openid-connect/auth?client_id=account-console&redirect_uri=https%3A%2F%2Flocalhost%3A8543%2Fauth%2Frealms%2Ftest%2Faccount%2F%23%2Fsecurity%2Fsigningin&state=4bb98792-22ed-4a5b-b419-2072c315ee79&response_mode=fragment&response_type=code&scope=openid&nonce=d8e96f28-cf46-49d2-866e-099fb7ea4862&code_challenge=3AF-8IqACysB70uGxOsCmIOzhGfz_qoNK2kdbJ3wDtU&code_challenge_method=S256)\n\tat org.junit.Assert.fail(Assert.java:89)\n\tat org.junit.Assert.assertTrue(Assert.java:42)\n\tat org.keycloak.testsuite.page.AbstractPage.assertCurrent(AbstractPage.java:110)\n\tat jdk.internal.reflect.GeneratedMethodAccessor628.invoke(Unknown Source)\n..." + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "team-ui", + "flaky-test" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/23517", + "sourceRepo": "keycloak/keycloak", + "reactions": 8, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:31.649Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-24512-implements-the-jwt-bearer-grant-type-as-described-in-rfc-7523-for.json b/solutions/cncf-generated/keycloak/keycloak-24512-implements-the-jwt-bearer-grant-type-as-described-in-rfc-7523-for.json new file mode 100644 index 00000000..f6173672 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-24512-implements-the-jwt-bearer-grant-type-as-described-in-rfc-7523-for.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:16.180Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Implements the jwt-bearer grant type as described in RFC 7523 for uti…", + "description": "Implements the jwt-bearer grant type as described in RFC 7523 for utilizing JWTs as authorization grants.\n\nThis implementation adds an additional Assertion Grant grant type for clients which enables the client to submit signed JWTs to the token endpoint and receive an access token for the specified user. It also adds a configuration block for OIDC clients where administrators can configure trusted issuer configurations.\n\nWhen Keycloak receives an assertion grant request from a client, it will verify the signed JWT against the certificate, issuer, and audience specified in the trusted issuer configs and check the client has the 'impersonate' permission for the specified user. If no config successfully validates the token or if the client is not allowed to impersonate the specified user, the request fails.\n\nCloses #24509", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Some thoughts on performance testing and persistent sessions: For now this PR is using transient user sessions that live only for a single request. For this, no additional performance testing is needed. So I'm removing myself as a reviewer here. \n\nSome discussions above ask for the token introspection to work, and want persistent sessions to be enabled. I see that the `ClientCredentialsGrantType` has a switch `useRefreshToken` which then creates persistent user sessions, and the discussion above seems to go into a similar direction (not sure if `useRefreshToken` would be a good name for this option here, or if it ever was for `ClientCredentialsGrantType`). \n\nI don't know how token introspection works for client credentials grants, maybe that would give some insights. \nMaking user sessions persistent would be a high price for using the token introspection endpoint as it would either bloat the memory, or add database IO each time they are written to the database. To me, this would be a separate PR and discussion as also Pedro suggested above.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "java.lang.RuntimeException: Could not create statement\n\tat org.jboss.arquillian.junit.Arquillian.methodBlock(Arquillian.java:313)\n\tat org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100)\n\tat org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366)\n\tat org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103)\n...", + "java.lang.RuntimeException: Could not create statement\n\tat org.jboss.arquillian.junit.Arquillian.methodBlock(Arquillian.java:313)\n\tat org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100)\n\tat org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366)\n\tat org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103)\n..." + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "team-ui", + "flaky-test", + "team-core-clients", + "team-core-iam", + "team-core-shared" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/24512", + "sourceRepo": "keycloak/keycloak", + "reactions": 17, + "comments": 33 + }, + "security": { + "scannedAt": "2026-02-27T17:44:16.180Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-31275-fix-persist-refresh-token-for-idp-token-exchange.json b/solutions/cncf-generated/keycloak/keycloak-31275-fix-persist-refresh-token-for-idp-token-exchange.json new file mode 100644 index 00000000..16d987b2 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-31275-fix-persist-refresh-token-for-idp-token-exchange.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:29.210Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: fix: persist refresh token for IDP token exchange", + "description": "Closes #39502\n\nSave the newly refreshed token in the database, so that on keycloak restart/session purge scenario, subsequent token refresh exchanges always uses the updated refresh token.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thank you @thomasdarimont for haing a look here. I see that the test case `testRefreshTokenPersistence` which is added in this PR fails. While the test is necessary to show that this works as expected, the test would need to pass before this PR can be merged.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "java.lang.AssertionError: expected: but was:\n\tat org.junit.Assert.fail(Assert.java:89)\n\tat org.junit.Assert.failNotEquals(Assert.java:835)\n\tat org.junit.Assert.assertEquals(Assert.java:120)\n\tat org.junit.Assert.assertEquals(Assert.java:146)\n..." + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "flaky-test", + "team-core-clients", + "team-core-iam" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/31275", + "sourceRepo": "keycloak/keycloak", + "reactions": 11, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:44:29.211Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-32144-support-for-content-security-policy-header.json b/solutions/cncf-generated/keycloak/keycloak-32144-support-for-content-security-policy-header.json new file mode 100644 index 00000000..bcbe915e --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-32144-support-for-content-security-policy-header.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:23.922Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Support for Content Security Policy Header", + "description": "Extends the CSP builder to support script-src and style-src, extends the Security Headers Provider to permit the injection of script-src and style-src directives, and implements hash generation for the on-the-fly JavaScript used for browser history and OIDC form redirect.\n\nAdds script-src and style-src nonce support to Freemarker login templates with new NonceBean.\n\nCloses #32123", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Very much looking forward to this one, appreciate your efforts!", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-ui", + "team-core-shared" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/32144", + "sourceRepo": "keycloak/keycloak", + "reactions": 12, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:44:23.922Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-33046-provide-missing-user-event-metrics-from-aerogear-keycloak-metrics.json b/solutions/cncf-generated/keycloak/keycloak-33046-provide-missing-user-event-metrics-from-aerogear-keycloak-metrics.json new file mode 100644 index 00000000..f710e056 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-33046-provide-missing-user-event-metrics-from-aerogear-keycloak-metrics.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:33.135Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Provide missing user event metrics from aerogear/keycloak-metrics-spi with a key…", + "description": "…cloak mircometer event listener\n\ninspired by\nhttps://github.com/aerogear/keycloak-metrics-spi\nhttps://github.com/please-openit/keycloak-native-metrics https://github.com/thomasdarimont/keycloak-project-example/tree/main/keycloak/extensions/src/main/java/com/github/thomasdarimont/keycloak/custom/metrics\n\nPreview of the docs: \nhttps://github.com/bohmber/keycloak/blob/issue-33043/docs/guides/server/event-metrics.adoc\n\nfixes: #33043", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@bohmber - thank you for this PR. Please provide an example output of the metrics you created. \n\nFor the prefix, I'd go with `keycloak.`", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "java.lang.AssertionError: Expected RegisterPage but was Sign in to test (https://localhost:8543/auth/realms/test/protocol/openid-connect/auth?response_type=code&client_id=test-app&redirect_uri=https%3A%2F%2Flocalhost%3A8543%2Fauth%2Frealms%2Fmaster%2Fapp%2Fauth&state=f892fe75-87a3-4861-8834-4585a812b5db&scope=openid)\n\tat org.junit.Assert.fail(Assert.java:89)\n\tat org.junit.Assert.assertTrue(Assert.java:42)\n\tat org.keycloak.testsuite.pages.AbstractPage.assertCurrent(AbstractPage.java:47)\n\tat java.base/jdk.internal.reflect.DirectMethodHandleAccessor.invoke(DirectMethodHandleAccessor.java:103)\n...", + "java.lang.AssertionError: Expected RegisterPage but was Sign in to test (https://localhost:8543/auth/realms/test/protocol/openid-connect/auth?response_type=code&client_id=test-app&redirect_uri=https%3A%2F%2Flocalhost%3A8543%2Fauth%2Frealms%2Fmaster%2Fapp%2Fauth&state=3ef29f94-72df-44b9-94e0-d7bcc7e568f4&scope=openid)\n\tat org.junit.Assert.fail(Assert.java:89)\n\tat org.junit.Assert.assertTrue(Assert.java:42)\n\tat org.keycloak.testsuite.pages.AbstractPage.assertCurrent(AbstractPage.java:47)\n\tat java.base/jdk.internal.reflect.DirectMethodHandleAccessor.invoke(DirectMethodHandleAccessor.java:103)\n...", + "java.lang.AssertionError: Expected RegisterPage but was Sign in to test (https://localhost:8543/auth/realms/test/protocol/openid-connect/auth?response_type=code&client_id=test-app&redirect_uri=https%3A%2F%2Flocalhost%3A8543%2Fauth%2Frealms%2Fmaster%2Fapp%2Fauth&state=63187ca7-8d4c-4901-ab1b-f31b287b839d&scope=openid)\n\tat org.junit.Assert.fail(Assert.java:89)\n\tat org.junit.Assert.assertTrue(Assert.java:42)\n\tat org.keycloak.testsuite.pages.AbstractPage.assertCurrent(AbstractPage.java:47)\n\tat java.base/jdk.internal.reflect.DirectMethodHandleAccessor.invoke(DirectMethodHandleAccessor.java:103)\n..." + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "flaky-test", + "team-core-clients", + "team-core-iam" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/33046", + "sourceRepo": "keycloak/keycloak", + "reactions": 8, + "comments": 52 + }, + "security": { + "scannedAt": "2026-02-27T17:44:33.135Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-35075-add-config-param-disabletypeclaimcheck-in-order-to-validate-exter.json b/solutions/cncf-generated/keycloak/keycloak-35075-add-config-param-disabletypeclaimcheck-in-order-to-validate-exter.json new file mode 100644 index 00000000..d4466d0f --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-35075-add-config-param-disabletypeclaimcheck-in-order-to-validate-exter.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:15.000Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Add config param disableTypeClaimCheck in order to validate external …", + "description": "…tokens without typ claim\n\nCloses #33332", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Closes #33332", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "team-ui", + "team-core-clients", + "team-core-iam" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/35075", + "sourceRepo": "keycloak/keycloak", + "reactions": 17, + "comments": 5 + }, + "security": { + "scannedAt": "2026-02-27T17:44:15.000Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-36665-sending-mails-via-smtp-and-xoauth2-authentication-mechanism.json b/solutions/cncf-generated/keycloak/keycloak-36665-sending-mails-via-smtp-and-xoauth2-authentication-mechanism.json new file mode 100644 index 00000000..51027d14 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-36665-sending-mails-via-smtp-and-xoauth2-authentication-mechanism.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:13.330Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Sending Mails via SMTP and XOAUTH2 authentication mechanism", + "description": "Closes #17432\n\nFirst draft, gathering tokens from additional settings, make existing tests work.\n\nFind a way for testing looks compilcated after some hours.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR is IMHO already in a good shape: The functionality looks good, the are tests, the tests are green (the one failure is the external link check that is unrelated). \n\nAfter talking to @srose, may someone from the @keycloak/core-iam teams have a look? If this should be a @keycloak/core-clients issue instead, please let me know.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "team-ui", + "team-core-clients", + "team-core-iam" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/36665", + "sourceRepo": "keycloak/keycloak", + "reactions": 25, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:44:13.330Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-38781-integrate-current-auth-username-password-form-authenticator-with-.json b/solutions/cncf-generated/keycloak/keycloak-38781-integrate-current-auth-username-password-form-authenticator-with-.json new file mode 100644 index 00000000..616c07c9 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-38781-integrate-current-auth-username-password-form-authenticator-with-.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:34.353Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Integrate current auth-username-password-form authenticator with passkeys isConditionalMediationAvailable", + "description": "Closes #29596\n\nDraft PR for now that adds a new authenticator that extends the normal username password form authenticator with the `isConditionalMediationAvailable` login for passkeys. The main ideas are the following:\n\n- New method to return optional categories in authenticators.\n- The autocomplete attribute with the webauthn is added to the username input.\n- The user can use the passkey or common username/password variant.\n- If the passkey fails an error is presented and the webauthn option is removed (so if it fails, it goes back to only username/password). We can change this but for automated testing is complicated, selenium automatically uses the passkey generating a loop. I preferred to this way. We can also add the restart if needed.\n- The authenticator is new, so in the flow the normal username/password authenticator should be replaced by this new `Passkeys Username Password Form`.\n- Probably we need to continue providing variants for other authenticators (for example the sing", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Rebased to current main.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "error=\"not_allowed\", web_authn_authentication_error=\"webauthn-error-api-get\", web_authn_authentication_error_detail=\"WebAuthn is not supported by this browser. Try another one or contact your administrator.\"" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "team-ui", + "flaky-test", + "team-core-clients", + "team-core-iam" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/38781", + "sourceRepo": "keycloak/keycloak", + "reactions": 8, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:44:34.353Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-40508-make-organization-domains-optional.json b/solutions/cncf-generated/keycloak/keycloak-40508-make-organization-domains-optional.json new file mode 100644 index 00000000..485a9b43 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-40508-make-organization-domains-optional.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:22.531Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Make organization `domains` optional", + "description": "Closes #31285", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "As with other pull requests: Please do not rebase a PR unless a reviewer asks you to rebase the PR, or GitHub reports conflicts. Every rebase will trigger unecessary CI runs and notifications to maintainers. The PR will be rebased on merge anyway, so please ignore any GitHub notifications that the branch is outdated.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "java.lang.RuntimeException: java.lang.IllegalStateException: Keycloak unexpectedly died :(\n\tat org.keycloak.testsuite.arquillian.containers.KeycloakQuarkusServerDeployableContainer.start(KeycloakQuarkusServerDeployableContainer.java:71)\n\tat org.jboss.arquillian.container.impl.ContainerImpl.start(ContainerImpl.java:185)\n\tat org.jboss.arquillian.container.impl.client.container.ContainerLifecycleController$8.perform(ContainerLifecycleController.java:137)\n\tat org.jboss.arquillian.container.impl.client.container.ContainerLifecycleController$8.perform(ContainerLifecycleController.java:133)\n..." + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "team-ui", + "flaky-test", + "team-core-clients", + "team-core-iam" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/40508", + "sourceRepo": "keycloak/keycloak", + "reactions": 13, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:22.531Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-40526-manage-organization-invites.json b/solutions/cncf-generated/keycloak/keycloak-40526-manage-organization-invites.json new file mode 100644 index 00000000..7616874a --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-40526-manage-organization-invites.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:12.151Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Manage Organization Invites", + "description": "Adds full organization invitation management to Keycloak. Allows admins to create, view, update, and delete organization invitations through both REST API and admin console.\n\n- Backend: New invitation SPI with JPA persistence, enhanced REST endpoints, and database schema updates\n- Admin UI: New invitation management interface integrated into the organization members section\n- API: Extended admin client with invitation CRUD operations\n\nCloses #38809", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@SferaDev - I see you are regularly rebasing this PR. There is no need for that except when there are merge conflicts or a reviewer asks you to rebase. Otherwise every rebases uses a lot of CI minutes, and sends out notifications to those that are code owners. \n\nI hope the Keycloak core-clients team and @pedroigor eventually finds time for this PR.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "java.lang.RuntimeException: java.lang.IllegalStateException: Keycloak unexpectedly died :(\n\tat org.keycloak.testsuite.arquillian.containers.KeycloakQuarkusServerDeployableContainer.start(KeycloakQuarkusServerDeployableContainer.java:71)\n\tat org.jboss.arquillian.container.impl.ContainerImpl.start(ContainerImpl.java:185)\n\tat org.jboss.arquillian.container.impl.client.container.ContainerLifecycleController$8.perform(ContainerLifecycleController.java:137)\n\tat org.jboss.arquillian.container.impl.client.container.ContainerLifecycleController$8.perform(ContainerLifecycleController.java:133)\n...", + "java.lang.RuntimeException: java.lang.IllegalStateException: Keycloak unexpectedly died :(\n\tat org.keycloak.testsuite.arquillian.containers.KeycloakQuarkusServerDeployableContainer.start(KeycloakQuarkusServerDeployableContainer.java:71)\n\tat org.jboss.arquillian.container.impl.ContainerImpl.start(ContainerImpl.java:185)\n\tat org.jboss.arquillian.container.impl.client.container.ContainerLifecycleController$8.perform(ContainerLifecycleController.java:137)\n\tat org.jboss.arquillian.container.impl.client.container.ContainerLifecycleController$8.perform(ContainerLifecycleController.java:133)\n...", + "org.openqa.selenium.TimeoutException: \njava.net.SocketTimeoutException: Read timed out\nBuild info: version: '4.28.1', revision: '73f5ad48a2'\nSystem info: os.name: 'Linux', os.arch: 'amd64', os.version: '6.11.0-1015-azure', java.version: '21.0.7'\nDriver info: driver.version: HtmlUnitDriver\n..." + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "team-cloud-native", + "team-ui", + "flaky-test", + "team-core-clients", + "team-core-iam", + "team-production-readiness" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/40526", + "sourceRepo": "keycloak/keycloak", + "reactions": 26, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:44:12.151Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-40586-manage-organization-roles.json b/solutions/cncf-generated/keycloak/keycloak-40586-manage-organization-roles.json new file mode 100644 index 00000000..7edeb812 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-40586-manage-organization-roles.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:17.114Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: Manage Organization Roles", + "description": "Closes #40585", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@ssilvert I would actually pay for this feature to be merged.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [ + "Role" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/40586", + "sourceRepo": "keycloak/keycloak", + "reactions": 16, + "comments": 5 + }, + "security": { + "scannedAt": "2026-02-27T17:44:17.114Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-7721-keycloak-14143-turn-oidc-requested-scopes-validation-into-an-spi.json b/solutions/cncf-generated/keycloak/keycloak-7721-keycloak-14143-turn-oidc-requested-scopes-validation-into-an-spi.json new file mode 100644 index 00000000..9bd0e749 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-7721-keycloak-14143-turn-oidc-requested-scopes-validation-into-an-spi.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:18.140Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: KEYCLOAK-14143 Turn OIDC requested scopes validation into an SPI", + "description": "Fix #8751\n\nThis change turns OIDC requested scopes validation into an SPI.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "CI says `ClientInvalidationClusterTest` is failing.\n\nHowever, this test is green on my computer when running:\n```\n mvn -f testsuite/integration-arquillian/pom.xml clean install \\\n -Pauth-server-cluster-quarkus \\\n -Dsession.cache.owners=2 \\\n -Dtest=ClientInvalidationClusterTest\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "mvn -f testsuite/integration-arquillian/pom.xml clean install \\\r\n -Pauth-server-cluster-quarkus \\\r\n -Dsession.cache.owners=2 \\\r\n -Dtest=ClientInvalidationClusterTest", + "mvn -f testsuite/integration-arquillian/pom.xml clean install -Pauth-server-cluster-quarkus -Dsession.cache.owners=2 -Dtest=**.cluster.**" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "status-needs-discussion" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/7721", + "sourceRepo": "keycloak/keycloak", + "reactions": 16, + "comments": 5 + }, + "security": { + "scannedAt": "2026-02-27T17:44:18.140Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-7943-keycloak-6455-ability-to-require-email-to-be-verified-before-chang.json b/solutions/cncf-generated/keycloak/keycloak-7943-keycloak-6455-ability-to-require-email-to-be-verified-before-chang.json new file mode 100644 index 00000000..58749bfc --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-7943-keycloak-6455-ability-to-require-email-to-be-verified-before-chang.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:10.955Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: KEYCLOAK-6455 Ability to require email to be verified before changing", + "description": "Fix #11875\n\nThis PR adds an UPDATE_EMAIL action (enabled by default) that can be used as an AIA or a required action. The action is associated with a single email input form. If the realm has email verification disabled, this action will allow to update the email without verification. If the realm has email verification enabled, the action will send an email update action token to the new email address without changing the account email. Only the action token triggering will complete the email update.\n\nIn the account application personal info (Keycloak V2), this PR turns the email input field into a permanent readonly field. If the UPDATE_EMAIL action is enabled, an \"Update Email\" link will allow to trigger UPDATE_EMAIL action as an AIA. If the UPDATE_EMAIL action is disabled, there will be no link and therefore no way to update the email from the personal info page.\n\nThis PR conditionally removes the email field from `login-update-profile.ftl` form:\n- if the form is opened in a broker", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> * if the form is opened in a brokered identity context, the email field is kept\n> * otherwise, the email field is removed\n\nWhy should it be removed on a brokered context. It is still ok, to let a mapper copy the email value initially in a brokered identity context (Mapper mode \"import\"). Then it could be managed in the account at Keycloak including verification on updates.\nMaking any fields like email, first name or last name non-updateable is a different story and probably can not be solved within this PR.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "org.openqa.selenium.TimeoutException: \r\nExpected condition failed: waiting for wrapped: element to be clickable: GrapheneElement -> GrapheneElement -> [[[[FirefoxDriver: firefox on LINUX (d42f9a40-0b4b-4e40-a5f8-c6271b576168)] -> id: page-sidebar]] -> id: nav-link-personal-info] (tried for 5 second(s) with 500 milliseconds interval)\r\nBuild info: version: '3.14.0', revision: 'aacccce0', time: '2018-08-02T20:19:58.91Z'\r\nSystem info: host: 'paas.psi.redhat.com', ip: '127.0.0.1', os.name: 'Linux', os.arch: 'amd64', os.version: '4.18.0-240.22.1.el8_3.x86_64', java.version: '1.8.0_311'\r\nDriver info: org.openqa.selenium.firefox.FirefoxDriver$$EnhancerByGraphene$$11cadc95\r\nCapabilities {acceptInsecureCerts: true, browserName: firefox, browserVersion: 87.0, javascriptEnabled: true, moz:accessibilityChecks: false, moz:buildID: 20210318103112, moz:geckodriverVersion: 0.30.0, moz:headless: false, moz:processID: 12753, moz:profile: /tmp/rust_mozprofilejSnW8W, moz:shutdownTimeout: 60000, moz:useNonSpecCompliantPointerOrigin: false, moz:webdriverClick: true, pageLoadStrategy: normal, platform: LINUX, platformName: LINUX, platformVersion: 4.18.0-240.22.1.el8_3.x86_64, rotatable: false, setWindowRect: true, strictFileInteractability: false, timeouts: {implicit: 0, pageLoad: 300000, script: 30000}, unhandledPromptBehavior: dismiss and notify}\r\nSession ID: d42f9a40-0b4b-4e40-a5f8-c6271b576168\r\n\tat org.openqa.selenium.support.ui.WebDriverWait.timeoutException(WebDriverWait.java:113)\r\n\tat org.openqa.selenium.support.ui.FluentWait.until(FluentWait.java:283)\r\n\tat org.jboss.arquillian.graphene.wait.WebDriverWaitImpl.until(WebDriverWaitImpl.java:96)\r\n\tat org.jboss.arquillian.graphene.wait.WebDriverWaitImpl.commit(WebDriverWaitImpl.java:102)\r\n\tat org.jboss.arquillian.graphene.wait.IsNotElementBuilderImpl.clickable(IsNotElementBuilderImpl.java:62)\r\n\tat org.keycloak.testsuite.util.UIUtils.clickLink(UIUtils.java:75)\r\n\tat org.keycloak.testsuite.ui.account2.page.fragment.Sidebar.lambda$clickNav$0(Sidebar.java:94)\r\n\tat org.keycloak.testsuite.ui.account2.page.fragment.Sidebar.performOperationWithSidebarExpanded(Sidebar.java:71)\r\n\tat org.keycloak.testsuite.ui.account2.page.fragment.Sidebar.clickNav(Sidebar.java:94)\r\n\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\r\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\r\n\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\r\n\tat java.lang.reflect.Method.invoke(Method.java:498)\r\n\tat org.jboss.arquillian.graphene.proxy.GrapheneProxyHandler.invokeReal(GrapheneProxyHandler.java:129)\r\n\tat org.jboss.arquillian.graphene.proxy.GrapheneContextualHandler$1.invoke(GrapheneContextualHandler.java:169)\r\n\tat org.jboss.arquillian.graphene.proxy.GrapheneContextualHandler$2.call(GrapheneContextualHandler.java:241)" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security", + "priority-important" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/7943", + "sourceRepo": "keycloak/keycloak", + "reactions": 31, + "comments": 56 + }, + "security": { + "scannedAt": "2026-02-27T17:44:10.955Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keycloak/keycloak-8260-keycloak-849-session-limiting-feature.json b/solutions/cncf-generated/keycloak/keycloak-8260-keycloak-849-session-limiting-feature.json new file mode 100644 index 00000000..3b392060 --- /dev/null +++ b/solutions/cncf-generated/keycloak/keycloak-8260-keycloak-849-session-limiting-feature.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:21.323Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keycloak: KEYCLOAK-849-session-limiting-feature", + "description": "Under contract of Royal Boom Publishers I've implemented two authenticators which can be used to limit the amount of sessions for a realm, for a client or for users per client.\nHere is a link to the ticket: https://issues.redhat.com/browse/KEYCLOAK-849", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@mfdewit Are you able to follow-up on the mentioned issues anytime soon?\n\nKeycloak team is interested a lot in this feature, so we consider to take your PR and continue with the remaining work. Please reply here if you are already working on the review comments or if you would like to start soon on them. Otherwise we can take-over and continue.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keycloak", + "incubating", + "security" + ], + "category": "security", + "cncfProjects": [ + "keycloak" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keycloak/keycloak/pull/8260", + "sourceRepo": "keycloak/keycloak", + "reactions": 14, + "comments": 6 + }, + "security": { + "scannedAt": "2026-02-27T17:44:21.323Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keylime/keylime-1637-change-check-tpm-origin-check-to-a-warning-that-does-not-prevent-re.json b/solutions/cncf-generated/keylime/keylime-1637-change-check-tpm-origin-check-to-a-warning-that-does-not-prevent-re.json new file mode 100644 index 00000000..8253fd3b --- /dev/null +++ b/solutions/cncf-generated/keylime/keylime-1637-change-check-tpm-origin-check-to-a-warning-that-does-not-prevent-re.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:41.122Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keylime: Change check_tpm_origin check to a warning that does not prevent registration", + "description": "Fixes [1612](https://github.com/keylime/keylime/issues/1612)\n\nThis changes the `check_tpm_origin` check in `cert_utils.py` to emit warnings if the SAN or hardware filed in the SAN are missing but still return true, and return false an incorrect HW identifier is provided.\n\nThe possibilities are:\n* SAN missing: warn and pass\n* SAN present but relevant hw field missing: warn and pass\n* SAN present with HW info: pass or fail with error based on hw ID, no warning either way", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "The issue was that the SAN might be fully missing in some cases, right?\n\nI still would fail on the case that their is a SAN, but not the TPM OID. I would expect the following behavior:\n- SAN missing: issue warning but passes\n- SAN there: validate as usual", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "keylime", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "keylime" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/keylime/keylime/pull/1637", + "sourceRepo": "keylime/keylime", + "reactions": 0, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:41.122Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keylime/keylime-319-update-verifier-rest-api-to-return-error-for-invalid-exclude-list.json b/solutions/cncf-generated/keylime/keylime-319-update-verifier-rest-api-to-return-error-for-invalid-exclude-list.json new file mode 100644 index 00000000..2617476c --- /dev/null +++ b/solutions/cncf-generated/keylime/keylime-319-update-verifier-rest-api-to-return-error-for-invalid-exclude-list.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:39.997Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "keylime: Update verifier REST API to return error for invalid exclude list", + "description": "Fixes #314\n\nExclude list example:\n\n```shell\n[root@keylime keylime-dev]# cat ~/excludes.txt\n/root/keylime-dev/.*\n*\n```\n\nError from the verifier:\n```shell\n2020-06-05 20:40:21.875 - keylime.cloudverifier - WARNING - Invalid regex: nothing to repeat. Exclude list regex is misformatted. Please correct the issue and try again.\n```\n\nError from the tenant:\n```shell\n[root@keylime keylime-dev]# python keylime/cmd/tenant.py -v 127.0.0.1 -t 127.0.0.1 -tp 9002 -f /root/excludes.txt -u D432FBB3-D2F1-4A97-9EF7-75BD81C00000 --whitelist /root/whitelist.txt --exclude /root/excludes.txt -c add\nUsing config file /root/keylime-dev/keylime.conf\n2020-06-05 20:40:20.539 - keylime.tenant - WARNING - CAUTION: using default password for private key, please set private_key_pw to a strong password\n2020-06-05 20:40:20.539 - keylime.tenant - INFO - Setting up client TLS in /var/lib/keylime/cv_ca\n2020-06-05 20:40:20.546 - keylime.tenant - INFO - TPM PCR Mask from policy is 0x408000\n2020-06-05 20:40:20.546 - keylime.t", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is really good thanks @font \n\nIt worked well in testing, but noted one thing we may be able to refine.\n\nIs it doable to just report the regex that failed.\n\nI tested with a large whitelist placing the `*` at the end, and the HTTP response (bottom) and verifier log (bottom) contained the whole whitelist:\n\n![Screenshot from 2020-06-04 11-59-38](https://user-images.githubusercontent.com/7058938/83748985-eeaffd80-a65a-11ea-9ada-bd73accd84aa.png)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "[root@keylime keylime-dev]# cat ~/excludes.txt\r\n/root/keylime-dev/.*\r\n*", + "2020-06-05 20:40:21.875 - keylime.cloudverifier - WARNING - Invalid regex: nothing to repeat. Exclude list regex is misformatted. Please correct the issue and try again.", + "[root@keylime keylime-dev]# python keylime/cmd/tenant.py -v 127.0.0.1 -t 127.0.0.1 -tp 9002 -f /root/excludes.txt -u D432FBB3-D2F1-4A97-9EF7-75BD81C00000 --whitelist /root/whitelist.txt --exclude /root/excludes.txt -c add\r\nUsing config file /root/keylime-dev/keylime.conf\r\n2020-06-05 20:40:20.539 - keylime.tenant - WARNING - CAUTION: using default password for private key, please set private_key_pw to a strong password\r\n2020-06-05 20:40:20.539 - keylime.tenant - INFO - Setting up client TLS in /var/lib/keylime/cv_ca\r\n2020-06-05 20:40:20.546 - keylime.tenant - INFO - TPM PCR Mask from policy is 0x408000\r\n2020-06-05 20:40:20.546 - keylime.tenant - INFO - TPM PCR Mask from policy is 0x808000\r\n2020-06-05 20:40:20.671 - keylime.ima - WARNING - No boot_aggregate value found in whitelist, adding an empty one\r\n2020-06-05 20:40:21.876 - keylime.tenant - ERROR - Response code 400: Invalid regex: nothing to repeat. Exclude list regex is misformatted. Please correct the issue and try again.\r\n2020-06-05 20:40:21.876 - keylime.tenant - ERROR - POST command response: 400 Unexpected response from Cloud Verifier: {'code': 400, 'status': 'Invalid regex: nothing to repeat. Exclude list regex is misformatted. Please correct the issue and try again.', 'results': {}}" + ] + } + }, + "metadata": { + "tags": [ + "keylime", + "sandbox", + "app-definition", + "released" + ], + "category": "workloads", + "cncfProjects": [ + "keylime" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/keylime/keylime/pull/319", + "sourceRepo": "keylime/keylime", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:39.997Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kgateway/kgateway-7810-initial-slow-start-support.json b/solutions/cncf-generated/kgateway/kgateway-7810-initial-slow-start-support.json new file mode 100644 index 00000000..b44356ee --- /dev/null +++ b/solutions/cncf-generated/kgateway/kgateway-7810-initial-slow-start-support.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:44.022Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kgateway: Initial slow start support", + "description": "# Description\n\nAdd support for Envoy slow start mode in the `LeastRequest` and `RoundRobin` load balancers.\n\n# Context\n\nFixes #7807.\n\n# Checklist:\n\n- [x] I included a concise, user-facing changelog (for details, see https://github.com/solo-io/go-utils/tree/master/changelogutils) which references the issue that is resolved.\n- [x] If I updated APIs (our protos) or helm values, I ran `make -B install-go-tools generated-code` to ensure there will be no code diff\n- [x] I followed guidelines laid out in the Gloo Edge [contribution guide](https://docs.solo.io/gloo-edge/latest/contributing/)\n- [x] I opened a draft PR or added the work in progress label if my PR is not ready for review\n- [x] I have performed a self-review of my own code\n- [x] I have commented my code, particularly in hard-to-understand areas\n- [x] I have made corresponding changes to the documentation\n- [x] I have added tests that prove my fix is effective or that my feature works\n\nBOT NOTES: \nresolves https://github.com/solo-i", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "Now that Gloo supports Envoy's slow start mode (https://github.com/solo-io/gloo/pull/7810), this PR ensures that `slowStartConfig` is copied to the generated upstreams.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kgateway", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kgateway" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kgateway-dev/kgateway/pull/7810", + "sourceRepo": "kgateway-dev/kgateway", + "reactions": 1, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:48:44.022Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kmesh/kmesh-1094-update-hostname-and-record-unknown-services-of-service-metrics.json b/solutions/cncf-generated/kmesh/kmesh-1094-update-hostname-and-record-unknown-services-of-service-metrics.json new file mode 100644 index 00000000..8e993f89 --- /dev/null +++ b/solutions/cncf-generated/kmesh/kmesh-1094-update-hostname-and-record-unknown-services-of-service-metrics.json @@ -0,0 +1,57 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:48.631Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kmesh: update hostname and record unknown services of service metrics", + "description": "**What type of PR is this?**\n\n/kind bug\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\nFixes #1089. Update `destination_service_name` to service name instead of host.\n\nHere are some issues tha remain to be disccused:\n- When a service is not found when building service metrics, I record the `dstIp` in `destination_service`, I'm not sure if it is appropriate.\n- When recording waypoint metrics, services can not found since the `dstIp` is 15019, but only 15021 and 15008 are defined in waypoint services, so the following logic fails every time. Ztunnel uses actual service as destination service instead of the waypoint service, but seems like kmesh can not achieve that. I'm not sure how to handle this situation.\n```go\nnamespacedhost := \"\"\nif dstWorkload != nil {\n\tfor k, portList := range dstWorkload.Services {\n\t\tfor _, port := range portList.Ports {\n\t\t\tif port.TargetPort == uint32(data.dstPort) {\n\t\t\t\tnamespacedhost = k\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif namespacedhost != \"\"", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n\n/kind enhancement\n**What this PR does / why we need it**:\nAdd kiali addon sample and adjust prometheus_recording_istio sample for a better view of kiali\n**Which issue(s) this PR fixes**:\nFixes #1100\n\n**Special notes for your reviewer**:\nThis PR mainly fixes of the already existed `./samples/addon/prometheus_recoding_istio.yaml` and further provide a `kiali.yaml` for a sample Kiali deploy.\nWe can get a pretty decent sample traffic graph in Kiali with this and #1094 both resolved. The final look of the traffic graph will be like this\n\"image\"\n\n**Does this PR introduce a user-facing change?**:", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "- And a small question that why the metric are separated into service metric and workload metric?\r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n", + "Seems like when kiali drawing waypoint, it connect the waypoint to the destination service using the `destination_service_name` tag, but if we use the waypoint service as `destination_service_name`, I'm not sure if kiali will work as expected.\r\n\r\n![image](https://github.com/user-attachments/assets/9cbae83f-37c9-4884-b67a-f127e5ac8d2f)\r\n\nCan we add actual destination information in `tcp_probe_info`? Maybe add `actual_daddr` and `actual_dport` in the `bpf_sock_tuple` struct so that we can know if the traffic is redirected? (Although this information is only need in metrics, for now)", + ">When recording waypoint metrics, services can not found since the dstIp is 15019, but only 15021 and 15008 are defined in waypoint services, so the following logic fails every time. Ztunnel uses actual service as destination service instead of the waypoint service, but seems like kmesh can not achieve that. I'm not sure how to handle this situation.\r\n\r\n\r\nNow we support deploying waypoint via kmeshctl, why cannot we expose 15019 in the `gateway` object cc @YaoZengzeng \r\n\n> It is a little hard to review, can you do the cleanup/refact in a separate pr, can keep the critical fix here\r\n\r\nThis pr only contains the critical fix, but I made some refactorings to make the code tidy.\r\n\r\nFor example, I extract the following code to a new method `withDestination` since this logic is duplicated in `buildWorkloadMetric` and `buildServiceMetric`. Maybe I can make some explanation in the community meeting? 🤔" + ] + } + }, + "metadata": { + "tags": [ + "kmesh", + "sandbox", + "app-definition", + "kind-bug", + "lgtm", + "approved", + "size-xl" + ], + "category": "workloads", + "cncfProjects": [ + "kmesh" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Job", + "Namespace", + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kmesh-net/kmesh/pull/1094", + "sourceRepo": "kmesh-net/kmesh", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:48.631Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kmesh/kmesh-1249-tcp-long-connections-metrics.json b/solutions/cncf-generated/kmesh/kmesh-1249-tcp-long-connections-metrics.json new file mode 100644 index 00000000..c2320112 --- /dev/null +++ b/solutions/cncf-generated/kmesh/kmesh-1249-tcp-long-connections-metrics.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:49.737Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kmesh: Tcp long connections metrics", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**What this PR does / why we need it**:\nThe pr introduces new feature of tcp_long_conn metrics\n**Which issue(s) this PR fixes**:\n\nFixes #1211 \n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\nYes\n\n```\nTcp long connections metrics\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[APPROVALNOTIFIER] This PR is **NOT APPROVED**\n\nThis pull-request has been approved by:\n**Once this PR has been reviewed and has the lgtm label**, please assign nlgwcy for approval. For more information see [the Kubernetes Code Review Process](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process).\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=kmesh-net%2Fkmesh).\n\n
\nNeeds approval from an approver in each of these files:\n\n- **[OWNERS](https://github.com/kmesh-net/kmesh/blob/main/OWNERS)**\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Tcp long connections metrics", + "bpf_map.go:60: bpf init failed: bpf Load failed: load program: permission denied:\r\n \t; int sockops_prog(struct bpf_sock_ops *skops)\r\n \t0: (bf) r6 = r1", + "> bpf_map.go:60: bpf init failed: bpf Load failed: load program: permission denied:\r\n> \t; int sockops_prog(struct bpf_sock_ops *skops)\r\n> \t0: (bf) r6 = r1\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "kmesh", + "sandbox", + "app-definition", + "kind-feature", + "do-not-merge-contains-merge-commits", + "size-xxl", + "do-not-merge-work-in-progress" + ], + "category": "workloads", + "cncfProjects": [ + "kmesh" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kmesh-net/kmesh/pull/1249", + "sourceRepo": "kmesh-net/kmesh", + "reactions": 1, + "comments": 31 + }, + "security": { + "scannedAt": "2026-02-27T17:48:49.737Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kmesh/kmesh-1279-pretty-print-bpf-dump.json b/solutions/cncf-generated/kmesh/kmesh-1279-pretty-print-bpf-dump.json new file mode 100644 index 00000000..076e72e8 --- /dev/null +++ b/solutions/cncf-generated/kmesh/kmesh-1279-pretty-print-bpf-dump.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:47.208Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kmesh: pretty print bpf dump", + "description": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\n/kind enhancement\n\n**Which issue(s) this PR fixes**:\nFixes #1270. Pretty print the bpf dump by:\n- Print array in one line instead of multiple lines\n- Restore hashed id to the original name. `backendUid`, `upstreamUid` e.g.\n- Convert port to host byte order.\n\nAnd there is one issue remains unresolved. I tried to print ip in string format instead of [16]byte, however, I can not distinguish whether the ip is ipv4 or ipv6 since they all stored in [16]byte. Is there any good way to resolve this?\n\nNow the config dump looks like:\n\n```json\n{\n \"workloadPolicies\": [],\n \"backends\": [\n {\n \"ip\": \"10, 244, 1, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\",\n \"serviceCount\": 0,\n \"services\": []\n },\n {\n \"ip\": \"172, 18, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\",\n \"serviceCount\": 0,\n \"services\": []\n },\n {\n \"ip\": \"172, 18, 0, 7, ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "can we format the ip as string", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "can we format the ip as string\n> can we format the ip as string\r\n\r\nI got a question about this, how can we distinguish whether a [16]byte ip is ipv4 or ipv6?\r\n\r\nI tried to use `net.IP(ip[:])` first, but I failed since this uses the following logic to check whether the address is ipv4 and our ipv4 []byte does not follow this rule.", + "@Kuromesi I think we need to check the last 12 bytes = 0", + "The `endpointCount` should be 1\n> @Kuromesi I think we need to check the last 12 bytes = 0\r\n\r\nI agree, I was concerned that this may conflict with ipv6 whose last 12 bytes also equals to 0 at the beginning.\n>" + ] + } + }, + "metadata": { + "tags": [ + "kmesh", + "sandbox", + "app-definition", + "kind-enhancement", + "lgtm", + "approved", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "kmesh" + ], + "targetResourceKinds": [ + "Pod", + "Service" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kmesh-net/kmesh/pull/1279", + "sourceRepo": "kmesh-net/kmesh", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:47.208Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kmesh/kmesh-1417-feat-add-markdownlint-for-md-documents.json b/solutions/cncf-generated/kmesh/kmesh-1417-feat-add-markdownlint-for-md-documents.json new file mode 100644 index 00000000..b98a5caf --- /dev/null +++ b/solutions/cncf-generated/kmesh/kmesh-1417-feat-add-markdownlint-for-md-documents.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:50.687Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kmesh: feat: add markdownlint for md documents", + "description": "**What type of PR is this?**\n\n/kind enhancement\n\n**What this PR does / why we need it**:\n\nuse markdownlint(docker version) to lint markdown documents. \n\n> Currently there are many problems reported by markdownlint, so overwriting is disabled.\n\n**Which issue(s) this PR fixes**:\nFixes #1331\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Welcome @Flying-Tom! It looks like this is your first PR to kmesh-net/kmesh 🎉", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kmesh", + "sandbox", + "app-definition", + "kind-enhancement", + "lgtm", + "approved", + "size-xxl" + ], + "category": "workloads", + "cncfProjects": [ + "kmesh" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kmesh-net/kmesh/pull/1417", + "sourceRepo": "kmesh-net/kmesh", + "reactions": 0, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:50.687Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-11250-domainmapping-bring-your-own-certificate.json b/solutions/cncf-generated/knative/knative-11250-domainmapping-bring-your-own-certificate.json new file mode 100644 index 00000000..7b80fc29 --- /dev/null +++ b/solutions/cncf-generated/knative/knative-11250-domainmapping-bring-your-own-certificate.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:47.509Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Domainmapping bring your own certificate", + "description": "Allow domainmappings to specify the tls secret to be used by the autoTLS\ncertificate\n\nThese changes assume that autoTLS is enabled in the cluster and that if the secret doesn't exist in the cluster the intent\nis to create a new secret with the given name\n\nFixes #10530\n\n**Release Note**\n\n```release-note\nDomainmapping can now specify a tls secret to be used as the https certificate\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/retest", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# [Codecov](https://codecov.io/gh/knative/serving/pull/11250?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) Report\n> Merging [#11250](https://codecov.io/gh/knative/serving/pull/11250?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) (7461340) into [main](https://codecov.io/gh/knative/serving/commit/36766635487c8ea80a79c7891b858f8fbc0c3ec3?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) (3676663) will **increase** coverage by `0.01%`.\n> The diff coverage is `80.00%`.\n\n[![Impacted file tree graph](https://codecov.io/gh/knative/serving/pull/11250/graphs/tree.svg?width=650&height=150&src=pr&token=MPpDfAj4Ui&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative)](https://codecov.io/gh/knative/serving/pull/11250?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative)" + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "area-api", + "size-l", + "lgtm", + "area-networking", + "approved", + "area-test-and-release", + "cla--yes" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/knative/serving/pull/11250", + "sourceRepo": "knative/serving", + "reactions": 6, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:43:47.509Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-11322-use-tls-for-websocket-and-grpc-tests.json b/solutions/cncf-generated/knative/knative-11322-use-tls-for-websocket-and-grpc-tests.json new file mode 100644 index 00000000..9d4cdfd1 --- /dev/null +++ b/solutions/cncf-generated/knative/knative-11322-use-tls-for-websocket-and-grpc-tests.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:58.795Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Use TLS for websocket and gRPC tests", + "description": "Use TLS for websocket and gRPC tests\n\nCurrent gRPC and Websocket does not use TLS, but still uses HTTP even\nwhen it runs with HTTPS mode.\n\nThis patch fixes it.\n\nFIx https://github.com/knative/serving/issues/11386", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/test pull-knative-serving-https", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/knative/serving/pull/11322?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) | Coverage Δ | |\n|---|---|---|\n| [pkg/reconciler/configuration/configuration.go](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3JlY29uY2lsZXIvY29uZmlndXJhdGlvbi9jb25maWd1cmF0aW9uLmdv) | `84.61% <0.00%> (-1.33%)` | :arrow_down: |\n| [pkg/apis/serving/fieldmask.go](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL2FwaXMvc2VydmluZy9maWVsZG1hc2suZ28=) | `94.85% <0.00%> (ø)` | |\n| [pkg/reconciler/gc/reconciler.go](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3JlY29uY2lsZXIvZ2MvcmVjb25jaWxlci5nbw==) | `100.00% <0.00%> (ø)` | |\n| [pkg/reconciler/labeler/labeler.go](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3JlY29uY2lsZXIvbGFiZWxlci9sYWJlbGVyLmdv) | `100.00% <0.00%> (ø)` | |\n| [pkg/reconciler/autoscaling/kpa/kpa.go](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3JlY29uY2lsZXIvYXV0b3NjYWxpbmcva3BhL2twYS5nbw==) | `95.12% <0.00%> (+0.08%)` | :arrow_up: |\n| [.../reconciler/serverlessservice/serverlessservice.go](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3JlY29uY2lsZXIvc2VydmVybGVzc3NlcnZpY2Uvc2VydmVybGVzc3NlcnZpY2UuZ28=) | `93.33% <0.00%> (+0.08%)` | :arrow_up: |\n| [pkg/reconciler/domainmapping/reconciler.go](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3JlY29uY2lsZXIvZG9tYWlubWFwcGluZy9yZWNvbmNpbGVyLmdv) | `91.66% <0.00%> (+0.11%)` | :arrow_up: |\n| [pkg/reconciler/service/service.go](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3JlY29uY2lsZXIvc2VydmljZS9zZXJ2aWNlLmdv) | `85.31% <0.00%> (+0.20%)` | :arrow_up: |\n| [pkg/reconciler/route/route.go](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3JlY29uY2lsZXIvcm91dGUvcm91dGUuZ28=) | `76.68% <0.00%> (+0.24%)` | :arrow_up: |\n| [pkg/reconciler/revision/revision.go](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3JlY29uY2lsZXIvcmV2aXNpb24vcmV2aXNpb24uZ28=) | `87.87% <0.00%> (+0.37%)` | :arrow_up: |\n| ... and [3 more](https://codecov.io/gh/knative/serving/pull/11322/diff?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) | |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/knative/serving/pull/11322?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/knative/serving/pull/11322?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative). Last update [620cb44...6f0d9e9](https://codecov.io/gh/knative/serving/pull/11322?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative).\n\n/test pull-knative-serving-https\n/test pull-knative-serving-https\n/test pull-knative-serving-https\nHmm... race issue.\n/retest\r\n\r\npassed once. One more time.\n/retest\r\n\n/test pull-knative-serving-https\n/test pull-knative-serving-https\n/test pull-knative-serving-https\n/retest\n/test pull-knative-serving-https\n/retest\n/retest\r\n\r\nUmm.. not sure why it did not scale up." + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "size-l", + "lgtm", + "approved", + "area-test-and-release", + "cla--yes" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/knative/serving/pull/11322", + "sourceRepo": "knative/serving", + "reactions": 2, + "comments": 27 + }, + "security": { + "scannedAt": "2026-02-27T17:43:58.795Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-12715-add-support-for-topology-spread-constraint.json b/solutions/cncf-generated/knative/knative-12715-add-support-for-topology-spread-constraint.json new file mode 100644 index 00000000..f0de1ec4 --- /dev/null +++ b/solutions/cncf-generated/knative/knative-12715-add-support-for-topology-spread-constraint.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:49.865Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Add support for topology spread constraint", + "description": "Fixes https://github.com/knative/serving/issues/12639. \nKnative serving currently does not allow specifying `topologySpreadConstraints` in the pod spec as noted by this issue \n\n**I tested this by locally building knative and applying it to a k8s cluster. Topology spread constraints were able to work after enabling them through the config-feature config map**\n\n## Proposed Changes\n\n*\n*\n*\n\n**Release Note**\n\n```release-note\n\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Thanks for your pull request! It looks like this may be your first contribution to a Google open source project. Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\nFor more information, open the [CLA check for this pull request](https://github.com/knative/serving/pull/12715/checks?check_run_id=5484246303).\nWelcome @stevenchen-db! It looks like this is your first PR to knative/serving 🎉\nHi @stevenchen-db. Thanks for your PR.\n\nI'm waiting for a [knative](https://github.com/orgs/knative/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/knative/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=knative%2Fserving).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\n/ok-to-test\r\n\n> Since this PR is changing the PodSpec, I believe you'll need to [update the schemas](https://github.com/knative/serving/blob/main/DEVELOPMENT.md?plain=1#L230-L236) as well\r\n\r\n@psschwei I didn't see other things like node affinity or container runtime in `hack/schemapatch-config.yaml`. Is there any reason why topologySpreadConstraints need to be in this file?\n> Is there any reason why topologySpreadConstraints need to be in this file?\r\n\r\nSince you're behind a feature flag, [it doesn't](https://github.com/knative/serving/blob/main/DEVELOPMENT.md?plain=1#L235-L236). And since `preserveUnknownFields` is already set to true, doesn't look like there's anything needed on that front for this one (sorry about that, it's been a while since I looked at the schema job...)\r\n\n# [Codecov](https://codecov.io/gh/knative/serving/pull/12715?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) Report\n> Merging [#12715](https://codecov.io/gh/knative/serving/pull/12715?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) (04cebd0) into [main](https://codecov.io/gh/knative/serving/commit/0753bb1c55bd58d26b71f33db9776d78d92bff30?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) (0753bb1) will **increase** coverage by `0.00%`.\n> The diff coverage is `100.00%`." + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "area-api", + "size-l", + "lgtm", + "area-networking", + "approved", + "area-test-and-release", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/knative/serving/pull/12715", + "sourceRepo": "knative/serving", + "reactions": 3, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:43:49.865Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-13969-dataplane-trust-adding-mtls-and-tls-to-activator.json b/solutions/cncf-generated/knative/knative-13969-dataplane-trust-adding-mtls-and-tls-to-activator.json new file mode 100644 index 00000000..e66c5ac2 --- /dev/null +++ b/solutions/cncf-generated/knative/knative-13969-dataplane-trust-adding-mtls-and-tls-to-activator.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:51.494Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Dataplane-trust Adding mTLS and TLS To Activator", + "description": "Fixes #13968\n\nThis PR implements dataplane-trust options of the Activator. \n\nCode from `knative/pkg/network/transports.go` was modified and moved here to `activator/handler/transport.go`. \nThe code at `activator/handler/transport.go` depends on `activator/handler/context.go` and therefore cannot be moved back to `knative/pkg/network/transports.go`\n\nLeaving Queue changes to a separate PR\n\n**Release Note**\n\n```release-note\nWe added alpha support for dataplane-trust network options of Activator including TLS or mTLS and an appropriate set of certificates to implement trust between Activator and Queue.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Ensure that if someone uses the deprecated InternalEncryption, we use Dataplane-trust=Minimal as a minimum. \nInternalEncryption is deprecated but will take time to remove from dependencies", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Ensure that if someone uses the deprecated InternalEncryption, we use Dataplane-trust=Minimal as a minimum. \r\nInternalEncryption is deprecated but will take time to remove from dependencies\r\n\n@evankanderson @dprotaso @psschwei @ReToCode @nak3 \n> 5. Modifying function signatures will require changes to knative/networking...\r\n\r\nSorry if I misunderstand but do you mean `knative/pkg`'s networking and the change will affect a large area out side of this dataplane-trust change?\r\n\r\nIf the change will not affect a large area or at least we can manage it, I think it is OK to move forward this PR. I would like to wait for other reviewers, though.\n> > 5. Modifying function signatures will require changes to knative/networking...\r\n> \r\n> Sorry if I misunderstand but do you mean `knative/pkg`'s networking and the change will affect a large area out side of this dataplane-trust change?\r\n\r\nYes, you are correct, I am referring to `knative/pkg`'s networking.\r\n\r\nWe have a couple of options: one is to have a PR that detach from the parts of `knative/pkg`'s networking that need to change and later embed them into `knative/pkg`'s networking. The other is to make changes in `knative/pkg`'s networking first, then continue with this PR. \r\n\r\n\r\n> \r\n> If the change will not affect a large area or at least we can manage it, I think it is OK to move forward this PR. I would like to wait for other reviewers, though.\r\n\r\nSince *http.Transport offers http.RoundTripper interface, it seems e can do the changes in `knative/pkg`'s networking without changing the contract with the callers - \r\n\r\nChanging `func NewProxyAutoTLSTransport(maxIdle, maxIdlePerHost int, tlsConf *tls.Config) http.RoundTripper` to `func NewProxyAutoTLSTransport(maxIdle, maxIdlePerHost int, tlsConf *tls.Config) *http.Transport` for example, seem to not change the contract for the caller?\r\n\r\nThe other option is to add new exportables to `knative/pkg`'s networking with the new signature.\r\n\r\n \n> Changing func NewProxyAutoTLSTransport(maxIdle, maxIdlePerHost int, tlsConf *tls.Config) http.RoundTripper to func NewProxyAutoTLSTransport(maxIdle, maxIdlePerHost int, tlsConf *tls.Config) *http.Transport for example, seem to not change the contract for the caller?\r\n\r\nActually that's what I wanted to confirm :smile: If it does not change the contract for caller, I think the change does not affect so much and it should be alright.\n~~A bigger issue we may have is with testing - we have tests that rely on the interface - they use the handler.New() function. and provide it with an `http.RoundTripper` interface that is implemented as part of the test code.~~\r\n\r\n~~If we now move to use `*http.transport`, these tests will not work and we will need to rethink how to do them (Any thoughts there?)~~\r\n\r\nThis was later reverted\r\n\n## [Codecov](https://app.codecov.io/gh/knative/serving/pull/13969?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) Report\nPatch coverage: **`28.94`**% and project coverage change: **`-0.33`** :warning:\n> Comparison is base [(`5a90438`)](https://app.codecov.io/gh/knative/serving/commit/5a90438bbd9d7a677310c238b388d6f9a82ffe5c?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) 86.22% compared to head [(`01804ac`)](https://app.codecov.io/gh/knative/serving/pull/13969?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) 85.90%.\n\n> :exclamation: Current head 01804ac differs from pull request most recent head 64683a7. Consider uploading reports for the commit 64683a7 to get more accurate results\n\n
Additional details and impacted files" + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "area-api", + "size-xl", + "area-networking", + "area-autoscale" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/knative/serving/pull/13969", + "sourceRepo": "knative/serving", + "reactions": 2, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:43:51.494Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-14568-support-http1-full-duplex-per-workload.json b/solutions/cncf-generated/knative/knative-14568-support-http1-full-duplex-per-workload.json new file mode 100644 index 00000000..1c1b89ee --- /dev/null +++ b/solutions/cncf-generated/knative/knative-14568-support-http1-full-duplex-per-workload.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:57.907Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Support http1 full duplex per workload", + "description": "Fixes #12387 \n\n## Proposed Changes\n\n* Workloads annotated with `features.knative.dev/http-full-duplex=Enabled` will get the support for http1 full duplex end-to-end on the data path. Activator and QP h2c handlers dont seem to create an issue on the http1 case.\n* This feature requires a build with golang 1.21.x.\n* Handlers that override the writer methods need to be controlled with the new Golang fix. Check [this](https://github.com/skonto/test-reverse-proxy/blob/main/pkg/rp/rev_test.go#L164) for more.\nNote: If any of the wrappers is removed then you get ~1 request failure over tens of thousands of requests.\nHad to run the [test](https://github.com/skonto/test-reverse-proxy/tree/main#test-with-knative-serving) multiple times to get one failure, each run creates ~30K requests over a period ~1m. \n* Tested with [this repo](https://github.com/skonto/test-reverse-proxy/tree/main), [sample run](https://gist.github.com/skonto/0e4d61da67bf6a84ee60294f7fe5bc72). Might have to add an integration", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "# Changes\n\n- Continues https://github.com/knative/actions/pull/167\n- Follows guidelines here https://github.com/github/codeql/issues/13992\n- Need by this PR https://github.com/knative/serving/pull/14568, see this [run](https://github.com/knative/serving/actions/runs/6653489641/job/18079562592?pr=14568).", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\r\n\r\n\r\n\r\n# Changes\r\n\r\n\r\n\r\n- Continues https://github.com/knative/actions/pull/167\r\n- Follows guidelines here https://github.com/github/codeql/issues/13992\r\n- Need by this PR https://github.com/knative/serving/pull/14568, see this [run](https://github.com/knative/serving/actions/runs/6653489641/job/18079562592?pr=14568).\r\n\n## [Codecov](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) Report\nAttention: `25 lines` in your changes are missing coverage. Please review.\n> Comparison is base [(`e5602d7`)](https://app.codecov.io/gh/knative/serving/commit/e5602d72da89199790eaeefa7816e422758d656d?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) 86.02% compared to head [(`558ae15`)](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) 85.92%.\n> Report is 12 commits behind head on main.\n\n| [Files](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) | Patch % | Lines |\n|---|---|---|\n| [pkg/queue/sharedmain/handlers.go](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3F1ZXVlL3NoYXJlZG1haW4vaGFuZGxlcnMuZ28=) | 0.00% | [12 Missing :warning: ](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) |\n| [pkg/activator/handler/context.go](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL2FjdGl2YXRvci9oYW5kbGVyL2NvbnRleHQuZ28=) | 60.00% | [3 Missing and 1 partial :warning: ](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) |\n| [pkg/activator/handler/handler.go](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL2FjdGl2YXRvci9oYW5kbGVyL2hhbmRsZXIuZ28=) | 66.66% | [2 Missing and 1 partial :warning: ](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) |\n| [cmd/activator/main.go](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-Y21kL2FjdGl2YXRvci9tYWluLmdv) | 0.00% | [2 Missing :warning: ](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) |\n| [pkg/http/handler/timeout.go](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL2h0dHAvaGFuZGxlci90aW1lb3V0Lmdv) | 0.00% | [2 Missing :warning: ](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) |\n| [pkg/http/response\\_recorder.go](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL2h0dHAvcmVzcG9uc2VfcmVjb3JkZXIuZ28=) | 0.00% | [2 Missing :warning: ](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) |\n\n
Additional details and impacted files", + "
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/knative/serving/pull/14568?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative).\n\n/test performance-tests\n/test performance-tests\n/test istio-latest-no-mesh-tls\nSec failure:", + "It seems that we need to [setup go version with a separate step for CodeQL](https://github.com/github/codeql-go/issues/351). \ncc @dprotaso gentle ping\nCan you also update the go.mod file to ensure folks are building with `go1.21` \r\n\r\nOtherwise this feature won't work.\n> Can you also update the go.mod file to ensure folks are building with go1.21\r\n\r\nOk, I will update this on a separate PR. In general so far we have been building with a version later than the minimum in go.mod for different reasons like CVEs etc, so I would expect people not to use 1.18 (not maintained) but I agree let's make it easier. \n@dprotaso @ReToCode I updated the PR to address the comments, pls review.\nsorry took a bit, so you need to rebase again. lgtm.\r\n\r\nWDYT @dprotaso ?\r\n\n@dprotaso gentle ping.\n@dprotaso gentle ping :pray: \n@dprotaso ready, gentle ping.\n@dprotaso gentle ping.\n@dprotaso gentle ping.\nLooks good - I tried to run the test locally and it seemed to hang\r\n\r\n\r\n`go test -v -run TestActivatorChain`\r\n> 2024/01/12 13:48:59 http: proxy error: dial tcp 127.0.0.1:58796: connect: can't assign requested address\r\n> 2024/01/12 13:48:59 http: proxy error: dial tcp 127.0.0.1:58796: connect: can't assign requested address\r\n> main_test.go:253: error during request: unexpected status code: 502\r\n> main_test.go:253: error during request: failed to execute request: Post \"http://127.0.0.1:58798\": dial tcp 127.0.0.1:58798: connect: can't assign requested address\r\n> main_test.go:253: error during request: failed to execute request: Post \"http://127.0.0.1:58798\": dial tcp 127.0.0.1:58798: connect: can't assign requested address\r\n> main_test.go:253: error during request: failed to execute request: Post \"http://127.0.0.1:58798\": dial tcp 127.0.0.1:58798: connect: can't assign requested address\r\n> main_test.go:253: error during request: unexpected status code: 502\r\n> main_test.go:253: error during request: failed to execute request: Post \"http://127.0.0.1:58798\": dial tcp 127.0.0.1:58798: connect: can't assign requested address\r\n> main_test.go:253: error during request: failed to execute request: Post \"http://127.0.0.1:58798\": dial tcp 127.0.0.1:58798: connect: can't assign requested address\r\n> main_test.go:253: error during request: failed to execute request: Post \"http://127.0.0.1:58798\": dial tcp 127.0.0.1:58798: connect: can't assign requested address\r\n> main_test.go:253: error during request: failed to execute request: Post \"http://127.0.0.1:58798\": dial tcp 127.0.0.1:58798: connect: can't assign requested address\r\n> main_test.go:253: error during request: failed to execute request: Post \"http://127.0.0.1:58798\": dial tcp 127.0.0.1:58798: connect: can't assign requested address\r\n> main_test.go:253: error during request: unexpected status code: 502\r\n> main_test.go:253: error during request: failed to execute request: Post \"http://127.0.0.1:58798\": dial tcp 127.0.0.1:58798: connect: can't assign requested address\r\n> main_test.go:253: error during request: unexpected status code: 502\r\n> main_test.go:253: error during request: failed to execute request: Post \"http://127.0.0.1:58798\": dial tcp 127.0.0.1:58798: connect: can't assign requested address\r\n\r\nI'm on Mac ARM - go version 1.21.6\r\n\n@dprotaso hi,\r\n\r\n> I'm on Mac ARM - go version 1.21.6\r\n\r\nI don't use Mac but maybe @ReToCode can help. This seems like a known [MAC network setup error](https://stackoverflow.com/questions/54276809/how-to-fix-cant-assign-requested-address-even-i-tried-many-different-port-on), check also [here](https://support.ovpn.com/hc/en-us/articles/18188372203924-Why-can-t-I-assign-a-requested-address-code-49-) for a fix.\r\n\r\nAt my side tests pass (go 1.21.6):" + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "area-api", + "size-l", + "lgtm", + "area-networking", + "area-autoscale", + "approved" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/knative/serving/pull/14568", + "sourceRepo": "knative/serving", + "reactions": 2, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:43:57.907Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-15066-integrate-net-certmanager-in-serving.json b/solutions/cncf-generated/knative/knative-15066-integrate-net-certmanager-in-serving.json new file mode 100644 index 00000000..384f5ac1 --- /dev/null +++ b/solutions/cncf-generated/knative/knative-15066-integrate-net-certmanager-in-serving.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:56.252Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Integrate net-certmanager in Serving", + "description": "Fixes https://github.com/knative/serving/issues/14740\n\n## Proposed Changes\n\n* Moves net-certmanager into Serving under pkg/net-certmanager. This brings certmanager deps for creating certificates.\n* Migration path for users should be straightforward just remove the net-certmanager deployment before doing an upgrade (should not create downtime).\n* Enables the netcertmanager controller and the related informers only when it is required.\n\n**Release Note**\n\n```release-note\nThe net-certmanager controller is now part of the Serving core and specifically of the Serving controller.\nTo upgrade from an existing deployment you need to delete the net-certmanager deployment first. \n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fixes https://github.com/knative/serving/issues/14740\n\n## Proposed Changes\n\n* Moves net-certmanager into Serving under pkg/net-certmanager. This brings certmanager deps for creating certificates.\n* Migration path for users should be straightforward just remove the net-certmanager deployment before doing an upgrade (should not create downtime).\n* Enables the netcertmanager controller and the related informers only when it is required.\n\n**Release Note**\n\n```release-note\nThe net-certmanager controller is now part of the Serving core and specifically of the Serving controller.\nTo upgrade from an existing deployment you need to delete the net-certmanager deployment first. \n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Fixes https://github.com/knative/serving/issues/14740\r\n\r\n\r\n## Proposed Changes\r\n\r\n* Moves net-certmanager into Serving under pkg/net-certmanager. This brings certmanager deps for creating certificates.\r\n* Migration path for users should be straightforward just remove the net-certmanager deployment before doing an upgrade (should not create downtime).\r\n* Enables the netcertmanager controller and the related informers only when it is required.\r\n\r\n**Release Note**\r\n\r\n" + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "size-xxl", + "lgtm", + "approved" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/knative/serving/pull/15066", + "sourceRepo": "knative/serving", + "reactions": 2, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:43:56.252Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-15503-ensure-containerhealthy-condition-is-set-back-to-true.json b/solutions/cncf-generated/knative/knative-15503-ensure-containerhealthy-condition-is-set-back-to-true.json new file mode 100644 index 00000000..a54b5250 --- /dev/null +++ b/solutions/cncf-generated/knative/knative-15503-ensure-containerhealthy-condition-is-set-back-to-true.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:00.546Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Ensure ContainerHealthy condition is set back to True", + "description": "Fixes #15487 \n\n## Proposed Changes\n\nThis changes the Revision reconciler to contain a code path that changes the ContainerHealthy condition from False to True as the old code path is not active anymore (see linked issue). The criteria that has been chosen is whether the deployment has ready replicas.\n\nI am also changing the pod list in that area to run with limit=1 as only the first pod of the list is looked at anyway.\n\n**Release Note**\n\n```release-note\nA revision is now set to ContainerHealthy=True when it has ready replicas\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fixes #14157 \n\n## Proposed Changes\n\n* Introduces a new PA condition (`PodAutoscalerConditionScaleTargetScaled`) that detects failures during scaling to zero, covering the K8s gaps where deployment status is not updated correctly. The condition is set to `false` just before we scale down to zero (before the deployment update happens) and if pods are crashing. We set it back to `true` when we scale from zero and we have enough ready pods.\n\n* Previously when deployment was scaled down to zero, revision ready status would be true (and stay that way), but with this patch the pod error is detected and propagated:\n\n```\nKsvc status:\n\n{\n \"lastTransitionTime\": \"2024-10-04T13:57:35Z\",\n \"message\": \"Revision \\\"revision-failure-00001\\\" failed with message: Back-off pulling image \\\"index.docker.io/skonto/revisionfailure@sha256:c7dd34a5919877b89617c3a0df7382e7de0f98318f2c12bf4374bb293f104977\\\".\",\n \"reason\": \"RevisionFailed\",\n \"status\": \"False\",\n \"type\": \"ConfigurationsReady\"\n},\n\nRevision:\n\nk get revision\nNAME CONFIG NAME GENERATION READY REASON ACTUAL REPLICAS DESIRED REPLICAS\nrevision-failure-00001 revision-failure 1 False ImagePullBackOff 0 0\n\nPA status:\n{\n \"lastTransitionTime\": \"2024-10-04T13:57:35Z\",\n \"message\": \"Back-off pulling image \\\"index.docker.io/skonto/revisionfailure@sha256:c7dd34a5919877b89617c3a0df7382e7de0f98318f2c12bf4374bb293f104977\\\"\",\n \"reason\": \"ImagePullBackOff\",\n ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Fixes #14157 \r\n\r\n\r\n## Proposed Changes\r\n\r\n* Introduces a new PA condition (`PodAutoscalerConditionScaleTargetScaled`) that detects failures during scaling to zero, covering the K8s gaps where deployment status is not updated correctly. The condition is set to `false` just before we scale down to zero (before the deployment update happens) and if pods are crashing. We set it back to `true` when we scale from zero and we have enough ready pods.\r\n\r\n* Previously when deployment was scaled down to zero, revision ready status would be true (and stay that way), but with this patch the pod error is detected and propagated:", + "* Updates the pa status propagation logic in the revision reconciler. \r\n* Extends a bit the resource quota e2e test to show that when deployment is scaled to zero we will still report the error. That is irrelevant to this patch but we want to show that we cover certain scenarios more. Probably it would be good to add more e2e tests anyway.\r\n* The steps to test is simply start a skvc, let it scale to zero then remove the image from your registry, block any access (kill internet) and then issue a request. \nHi @SaschaSchwarze0. Thanks for your PR.\n\nI'm waiting for a [knative](https://github.com/orgs/knative/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/knative/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=knative%2Fserving).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes-sigs/prow](https://github.com/kubernetes-sigs/prow/issues/new?title=Prow%20issue:) repository.\n
\n\n/ok-to-test\ncc @dprotaso for review.\n## [Codecov](https://app.codecov.io/gh/knative/serving/pull/15503?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) Report\nAttention: Patch coverage is `80.64516%` with `6 lines` in your changes missing coverage. Please review.\n> Project coverage is 80.82%. Comparing base [(`df7f168`)](https://app.codecov.io/gh/knative/serving/commit/df7f1681a4e9b5daaab44f1416ea143d12d6ec4d?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) to head [(`8460f72`)](https://app.codecov.io/gh/knative/serving/commit/8460f72283f115088bdba0d3e17767a43e52d706?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative).\n> Report is 15 commits behind head on main.\n\n| [Files with missing lines](https://app.codecov.io/gh/knative/serving/pull/15503?dropdown=coverage&src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) | Patch % | Lines |\n|---|---|---|\n| [pkg/testing/functional.go](https://app.codecov.io/gh/knative/serving/pull/15503?src=pr&el=tree&filepath=pkg%2Ftesting%2Ffunctional.go&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative#diff-cGtnL3Rlc3RpbmcvZnVuY3Rpb25hbC5nbw==) | 62.50% | [5 Missing and 1 partial :warning: ](https://app.codecov.io/gh/knative/serving/pull/15503?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) |\n\n
Additional details and impacted files", + "
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/knative/serving/pull/15503?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative).\n\n@dprotaso gentle ping, any objections on this one? It seems ok to me.\nI updated the PR based on the discussion in https://cloud-native.slack.com/archives/C04LMU0AX60/p1733333864902479.\n@SaschaSchwarze0 Are you able to address the comments and wrap up the PR? We are looking forward with this fix.\n@SaschaSchwarze0 -- would you like some help getting this over the finish line? I can PR or commit to your branch if you won't have time to complete it; this is apparently also causing pain for @houshengbo and @yuzisun.\nI really hope this PR can be approved, since it has been causing issues to our env on weekly basis, during the past months.\r\nIt would be good that it can be patched to 1.15 and 1.16 branches.\n/lgtm\n@dprotaso gentle ping, @houshengbo the goal is to get this in for 1.17 that is coming soon but we can backport as well.\nI am now OK with the PR BTW.\r\nIn terms of how we can decide the revision or service is in good health, it is ok to check all the pods.\r\nWe used to run into situations that some pods are up, but some are not due to the node issues. It is not correct mark the service or revision as healthy, in this situation.\nHey FYI - I'm getting to this PR just trying to unblock dependency updates for the release first. \n@evankanderson @skonto @dprotaso I was reading most of the things that were commented, but did not have the time to make a conclusion out of them so far. Now, reading through all of them, I still think there needs to be a decision on the direction we want to go. I think there are two options (and the current PR is a mixture).\r\n\r\n(1) Mark ContainerHealthy true when all pods are healthy. This means one would need to loop through all pods like the PR currently does. And one should set ContainerHealthy to true when spec.replicas > 0 && spec.replicas == .status.readyReplicas (I think I had this at some point, but we changed that). The disadvantage of this is that one needs to retrieve all pods because there is no caching. Retrieving all pods has always been done so it would not be a change.\r\n\r\n(2) Mark ContainerHealthy true when one pod is healthy. This means that the condition to set this can stay at .status.readyReplicas > 0 the way it currently is. We can then recert the logic changes to check all pods but instead only look at 1. We can optimize the List call to run with `.status.phase==Running` (because I do not think we care about Pending pods) and with Limit=1 so that the API only returns one even if there are many.\r\n\r\nMy vote would be (2). ContainerHealthy would then most likely only be set to False for those \"permanent\" errors and not for things like an OOM that happens once per day assuming the KSvc's scale is >1.\nTentatively I would vote for option (2) given current semantics. Here are my thoughts:\r\n\r\nAs a side note, independently of this PR, one thing that we don't consider is the number of min replicas ready for characterizing a revision as healthy or not. For example in a scenario where minScale=2>1 and one pod keeps exiting what I observed is that the revision remains healthy and accepts traffic. However, initially, when we create the revision we will never become ready if we don't achieve the desired minScale. Is that ok? I don't think we have a strictly defined health condition for the revision. \r\nFor example should achieving minScale=N be a strict requirement at all times or should be the value defined by `deployment.Spec.Replicas` (as set by KPA dynamically)? If the former was the case then we would need to wait for `deployment.Status.ReadyReplicas = minScale` in this PR to reset `ContainerHealthy`. \r\n\r\nIn any case the scope of the PR is the bug of never setting back the condition to True and I think we should only consider the fix for the bug here given current Knative semantics.\r\n\r\nWe have:" + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "kind-bug", + "size-l", + "lgtm", + "approved", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [ + "Pod", + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/knative/serving/pull/15503", + "sourceRepo": "knative/serving", + "reactions": 2, + "comments": 26 + }, + "security": { + "scannedAt": "2026-02-27T17:44:00.546Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-3467-support-for-nodeselector-and-tolerations.json b/solutions/cncf-generated/knative/knative-3467-support-for-nodeselector-and-tolerations.json new file mode 100644 index 00000000..69f7b514 --- /dev/null +++ b/solutions/cncf-generated/knative/knative-3467-support-for-nodeselector-and-tolerations.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:48.932Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Support for NodeSelector and Tolerations.", + "description": "Fixes #1816\nCloses #1831\n\nTook this over from @krancour to rebase to master and push it through.\n\n## Proposed Changes\n* Support for nodeSelector and tolerations in revision spec\n\n**Release Note**\n\n```release-note\nSupport for nodeSelector and tolerations in revision spec\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fixes #1816\n\nThought this wouldn't be too controversial, so @jeremyrickard and I took a crack at it.\n\n## Proposed Changes\n\n * Support for `nodeSelector` and `tolerations` in revision spec", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "area-api", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/knative/serving/pull/3467", + "sourceRepo": "knative/serving", + "reactions": 3, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:43:48.932Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-4289-allow-users-to-specify-their-container-name.json b/solutions/cncf-generated/knative/knative-4289-allow-users-to-specify-their-container-name.json new file mode 100644 index 00000000..31002c73 --- /dev/null +++ b/solutions/cncf-generated/knative/knative-4289-allow-users-to-specify-their-container-name.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:01.420Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Allow users to specify their container name.", + "description": "With this change, users can specify the `name:` of the container they specify\nin their revision spec. This is to ease migration from K8s abstractions of\na similar shape where this field is required, and to lessen the eyesore of the\nyaml returned by the API server, which doesn't `omitempty`.\n\nThe default value for container name is configurable via `config-defaults`, but\ndefaults to `user-container` for consistency with what we have today. The\ncontainer name default in `config-defaults` is a Go template, which has access\nto the ObjectMeta of the enclosing resource's ObjectMeta (e.g. Service,\nConfiguration), so if an operator wanted to make the container name match the\nenclosing Service, they may set this to `{{.Name}}`.\n\nThis also pulls the user annotation stuff into v1beta1, which was apparently still TODO.\n\nFixes: https://github.com/knative/serving/issues/4257", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I had to rework this a bit due to the `deepcopy-gen` problem with `text/template.Template`. I'm also still looking at what I did to send `name:` to the preupgrade test.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "area-api", + "size-xl", + "lgtm", + "approved", + "area-test-and-release", + "cla--yes" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/knative/serving/pull/4289", + "sourceRepo": "knative/serving", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:01.420Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-4346-add-ingress-controller-implementation-and-logic-in-route-controller.json b/solutions/cncf-generated/knative/knative-4346-add-ingress-controller-implementation-and-logic-in-route-controller.json new file mode 100644 index 00000000..de5ff5ba --- /dev/null +++ b/solutions/cncf-generated/knative/knative-4346-add-ingress-controller-implementation-and-logic-in-route-controller.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:52.566Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Add Ingress controller implementation and logic in route controller to reconcile ingresses", + "description": "Add ingress controller for Fixes #3982. This is a part of the overall implementation if namespaced ingress. This PR covers the Ingress controller and its resources. \n\n## Proposed Changes\n\n* Create Ingress controller based on clusteringress\n* Add label keys for Ingress and IngressNamespace\n* ClusterIngress label will be \"/clusteringress\" instead of \"/ingress\"\n\n**Release Note**\n\n```release-note\n\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi @wtam2018. Thanks for your PR.\n\nI'm waiting for a [knative](https://github.com/orgs/knative/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/knative/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=knative%2Fserving).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "area-api", + "size-xxl", + "area-networking", + "area-test-and-release", + "cla--yes", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [ + "Ingress", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/knative/serving/pull/4346", + "sourceRepo": "knative/serving", + "reactions": 2, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:43:52.566Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-5186-add-support-for-auto-tls-http01-challenges.json b/solutions/cncf-generated/knative/knative-5186-add-support-for-auto-tls-http01-challenges.json new file mode 100644 index 00000000..6981fc45 --- /dev/null +++ b/solutions/cncf-generated/knative/knative-5186-add-support-for-auto-tls-http01-challenges.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:54.507Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: Add support for auto TLS HTTP01 challenges", + "description": "## Proposed Changes\n\nWhen cert-manager creates a solver service the Knative\ncertificate reconciler will populate `Status.HTTP01Challenges`\nof the corresponding Knative certificate. This challenge information\nis used to create an IngressRule to route traffic from Let's Encrypt\nto the solver service.\n\nFixes: #4100\nCo-Authored-By: Mike Petersen \n\n**Release Note**\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/hold\n\nI'd like to see tracking issues for the outstanding comments.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "area-api", + "size-xl", + "lgtm", + "area-networking", + "approved", + "cla--yes", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [ + "Service", + "Ingress" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/knative/serving/pull/5186", + "sourceRepo": "knative/serving", + "reactions": 2, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:43:54.507Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-6505-add-probe-path-for-whitelisting.json b/solutions/cncf-generated/knative/knative-6505-add-probe-path-for-whitelisting.json new file mode 100644 index 00000000..5cdfb93a --- /dev/null +++ b/solutions/cncf-generated/knative/knative-6505-add-probe-path-for-whitelisting.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:43:53.589Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "knative: add probe path for whitelisting", + "description": "Fixes #5918 \n\n## Proposed Changes\n\n* Add a probe path thats easy to whitelist\n* e2e test", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/test pull-knative-serving-istio-1.3-mesh\n/test pull-knative-serving-istio-1.3-no-mesh\n/test pull-knative-serving-istio-1.4-mesh\n/test pull-knative-serving-istio-1.4-no-mesh\n/test pull-knative-serving-kourier-stable\n/test pull-knative-serving-contour-latest\n/test pull-knative-serving-gloo-0.17.1", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "pkg/reconciler/nscert.Failure" + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "size-l", + "lgtm", + "area-networking", + "area-autoscale", + "approved", + "area-test-and-release", + "cla--yes", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/knative/serving/pull/6505", + "sourceRepo": "knative/serving", + "reactions": 2, + "comments": 28 + }, + "security": { + "scannedAt": "2026-02-27T17:43:53.589Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/ko/ko-498-feat-sign-checksum-using-cosign.json b/solutions/cncf-generated/ko/ko-498-feat-sign-checksum-using-cosign.json new file mode 100644 index 00000000..a2d2a7fb --- /dev/null +++ b/solutions/cncf-generated/ko/ko-498-feat-sign-checksum-using-cosign.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:53.564Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "ko: feat: sign checksum using cosign", + "description": "Signed-off-by: Furkan \nSigned-off-by: Batuhan Apaydın \n\nFixes #491", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "#### Summary\n\nWith cosign 1.3.0 it is not possible to store the public certificate generated by the OIDC-issuer in a convenient way.\nTo store the public key locally on disk, the user has to intercept the cosign stdout, parse it and store it.\nThis is inconvenient and error-prone. We want to make signing and public key distribution as easy as possible, hence this PR introduces a new `pubkey-output` flag for the `sign-blob` sub-command.\n\nThe `pubkey-output` flag respects the `b64` flag and prints the certificate/key as file in a given file path.\n\n#### Ticket Link\n\nFixes None\n\n#### Release Note\n\n```release-note\nThe cosign cli is now able to store the public certificate/key during sign-blob operations via the `pubkey-output` flag.\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "We used `cosign-release: 'v1.3.0'` here, but we can not do verify for now since we have to wait the next cosign release. Here is the related PR: https://github.com/sigstore/cosign/pull/991 by @shibumi. This feature scheduled in the next [v1.4.0](https://github.com/sigstore/cosign/milestone/12?closed=1) release. If we want to export the cert in the release, then probably we have to wait one of these PRs to get merged: https://github.com/sigstore/cosign/pull/1016 and https://github.com/sigstore/cosign/pull/1021 by @developer-guy\nAll (the pull request submitter and all commit authors) CLAs are signed, **but** one or more commits were authored or co-authored by someone other than the pull request submitter.\n\nWe need to confirm that all authors are ok with their commits being contributed to this project. Please have them confirm that by leaving a comment that contains only `@googlebot I consent.` in this pull request.\n\n*Note to project maintainer:* There may be cases where the author cannot leave a comment, or the comment is not properly detected as consent. In those cases, you can manually confirm consent of the commit author(s), and set the `cla` label to `yes` (if enabled on your project).\n\nℹ️ **Googlers: [Go here](https://goto.google.com/prinfo/https%3A%2F%2Fgithub.com%2Fgoogle%2Fko%2Fpull%2F498) for more info**.\n\n\n@shibumi cosign v1.4.1 with some bunch of fixes is released today as you might know, would you like to give this issue a hand? 🤩 Or I can do it if you want if you don't have time 🤝\n@developer-guy I can confirm this works: https://github.com/shibumi/kubectl-htpasswd/releases/tag/v0.1.7\r\n\r\nBut you *must* set the experimental flag with cosign v1.4.1\nkindly ping @mattmoor @imjasonh @shibumi, seems everything works fine.\r\n👉 https://github.com/developer-guy/ko/releases/tag/v0.9.3-signchecksum", + "👉 https://github.com/developer-guy/ko/runs/4592518036?check_suite_focus=true\n@Dentrax can you add these two lines to the goreleaser configuration? They ensure that goreleaser will produce a source tarball and signs this tarball as well. It provides an easy way to download a signed tarball via curl" + ] + } + }, + "metadata": { + "tags": [ + "ko", + "sandbox", + "app-definition", + "cla--yes" + ], + "category": "workloads", + "cncfProjects": [ + "ko" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/ko-build/ko/pull/498", + "sourceRepo": "ko-build/ko", + "reactions": 2, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:48:53.564Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kserve/kserve-1305-update-k8s-libraries-to-0-19-2.json b/solutions/cncf-generated/kserve/kserve-1305-update-k8s-libraries-to-0-19-2.json new file mode 100644 index 00000000..a5e6a258 --- /dev/null +++ b/solutions/cncf-generated/kserve/kserve-1305-update-k8s-libraries-to-0-19-2.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:37.903Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kserve: Update k8s libraries to 0.19.2", + "description": "**What this PR does / why we need it**:\nThis PR updates [our k8s dependencies](https://github.com/kubeflow/kfserving/blob/master/go.mod#L49-L74) to 0.19. It will provide compatibility with 0.19 k8s versions and allow projects using kfserving internally to have an easier migration to 0.19. \nOnly a small modification was needed to be able to use 0.19 - [updating the copy of podSpec we have](https://github.com/kubeflow/kfserving/compare/master...ivan-valkov:update-k8s-0.19.2?expand=1#diff-4bc42cc69a5f3db07b4b385408e396486044295aaa11b94fce4e71174f67ecafR245-R251).\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #1293\n\n**Special notes for your reviewer**:\n\nI have tested this change by running `make test`. All the formatting changes were made from running `make test`.\n\n**Release note**:\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@ivan-valkov can you help sign the cla?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Thanks for your pull request. It looks like this may be your first contribution to a Google open source project (if not, look below for help). Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\n:memo: **Please visit to sign.**\n\nOnce you've signed (or fixed any issues), please reply here with `@googlebot I signed it!` and we'll verify it.\n\n----\n\n#### What to do if you already signed the CLA\n\n##### Individual signers\n\n* It's possible we don't have your GitHub username or you're using a different email address on your commit. Check [your existing CLA data](https://cla.developers.google.com/clas) and verify that your [email is set on your git commits](https://help.github.com/articles/setting-your-email-in-git/).\n\n##### Corporate signers\n\n* Your company has a Point of Contact who decides which employees are authorized to participate. Ask your POC to be added to the group of authorized contributors. If you don't know who your Point of Contact is, direct the Google project maintainer to [go/cla#troubleshoot](http://go/cla#troubleshoot) ([Public version](https://opensource.google/docs/cla/#troubleshoot)).\n* The email used to register you as an authorized contributor must be the email used for the Git commit. Check [your existing CLA data](https://cla.developers.google.com/clas) and verify that your [email is set on your git commits](https://help.github.com/articles/setting-your-email-in-git/).\n* The email used to register you as an authorized contributor must also be [attached to your GitHub account](https://github.com/settings/emails).\n\t\t\n\nℹ️ **Googlers: [Go here](https://goto.google.com/prinfo/https%3A%2F%2Fgithub.com%2Fkubeflow%2Fkfserving%2Fpull%2F1305) for more info**.\n\n\nHi @ivan-valkov. Thanks for your PR.\n\nI'm waiting for a [kubeflow](https://github.com/orgs/kubeflow/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/kubeflow/kubeflow/blob/master/CONTRIBUTING.md) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=kubeflow%2Fkfserving).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\n> Hey @ivan-valkov , changes look good! Do you foresee any backwards compatibility issues running in older clusters?\r\n\r\nGood question @adriangonz , I do not expect there to be any issues, but let me test some basic workflows on a k8s 1.15 cluster and I will get back to you.\n> > Hey @ivan-valkov , changes look good! Do you foresee any backwards compatibility issues running in older clusters?\r\n> \r\n> Good question @adriangonz , I do not expect there to be any issues, but let me test some basic workflows on a k8s 1.15 cluster and I will get back to you.\r\n\r\nI tested on a kind cluster and creation and deletion work fine. I think there won't be backwards compatibility issues with updating these libs @adriangonz \r\n\n@ivan-valkov can you help sign the cla?\n@googlebot I signed it!\n/ok-to-test\n/test kubeflow-kfserving-presubmit\r\n\r\nTest to see if E2E test works fine without `Unauthorized` issue\nHey, hope you had a good weekend. I resolved a conflict with master and tests seem to be passing. Is there anything else we need to get this one merged @yuzisun @PatrickXYS \n/test kubeflow-kfserving-presubmit", + "Ref: http://86308603-argo-argo-5ce9-1162466691.us-west-2.elb.amazonaws.com/workflows/kubeflow-test-infra/kubeflow-kfserving-presubmit-e2e-1305-7b34a42-0256-a794?tab=workflow&nodeId=kubeflow-kfserving-presubmit-e2e-1305-7b34a42-0256-a794-4239156108&sidePanel=logs%3Akubeflow-kfserving-presubmit-e2e-1305-7b34a42-0256-a794-4239156108%3Amain\n/test kubeflow-kfserving-presubmit\n/test kubeflow-kfserving-presubmit\r\n\r\n\r\nTesting to see if the 401 occurs again.\n/retest\n/retest\n/test ?\n@PatrickXYS: The following commands are available to trigger jobs:\n* `/test kubeflow-kfserving-presubmit`\n\nUse `/test all` to run all jobs.\n\n\n
\n\nIn response to [this](https://github.com/kubeflow/kfserving/pull/1305#issuecomment-770499160):\n\n>/test ?\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n/test kubeflow-kfserving-presubmit", + "Please rebase master branch to make sure it updates test code\n/retest\nStill" + ] + } + }, + "metadata": { + "tags": [ + "kserve", + "incubating", + "app-definition", + "lgtm", + "approved", + "size-xl", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "kserve" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kserve/kserve/pull/1305", + "sourceRepo": "kserve/kserve", + "reactions": 2, + "comments": 27 + }, + "security": { + "scannedAt": "2026-02-27T17:44:37.904Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kserve/kserve-1334-support-knative-0-19.json b/solutions/cncf-generated/kserve/kserve-1334-support-knative-0-19.json new file mode 100644 index 00000000..d3d0e9b3 --- /dev/null +++ b/solutions/cncf-generated/kserve/kserve-1334-support-knative-0-19.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:36.723Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kserve: support knative 0.19+", + "description": "**What this PR does / why we need it**:\nAdds support for knative 0.19+ (and 0.20).\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #1333\n\n**Special notes for your reviewer**:\nWork in progress, we should replace the `KnativeLocalGateway` constant with an option from the inferenceservice ConfigMap so users of old knative can still use kfserving 0.5.\n\n1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.\n\n**Release note**:\n\n```release-note\nSupport Knative 0.20 and Istio 1.7.1\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/cc @yuzisun", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "[APPROVALNOTIFIER] This PR is **NOT APPROVED**\n\nThis pull-request has been approved by: *theofpa*\nTo complete the [pull request process](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process), please assign **cliveseldon** after the PR has been reviewed.\nYou can assign the PR to them by writing `/assign @cliveseldon` in a comment when ready.\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=kubeflow%2Fkfserving).\n\n
\nNeeds approval from an approver in each of these files:\n\n- **[OWNERS](https://github.com/kubeflow/kfserving/blob/master/OWNERS)**\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n/cc @yuzisun \nThanks @theofpa ! \r\n\r\nLet's also bump up the knative version [in e2e test](https://github.com/kubeflow/kfserving/blob/master/test/scripts/run-e2e-tests.sh#L27) ?\n/ok-to-test\n@theofpa Are you going to change to read from the configuration?\nlink to #1363 about making cluster-local-gateway configurable\n/rest kubeflow-kfserving-presubmit\n@theofpa seems an error during install", + "/retest\n> @theofpa seems an error during install\r\n> \r\n>", + "yes, it seems that the operator takes some time before it starts creating the resources and the kubectl wait fails to find any pods in the new namespace.\n/retest\n/retest\n/retest\n@theofpa seems it always fails on the logger test, is it possible that the gateway change breaks the logger?\n> @theofpa seems it always fails on the logger test, is it possible that the gateway change breaks the logger?\r\n\r\nThere was a json syntax error in the configmap and the inferenceservice was not becoming ready, let's give it one more try!\n/retest\n> > @theofpa seems it always fails on the logger test, is it possible that the gateway change breaks the logger?\r\n> \r\n> There was a json syntax error in the configmap and the inferenceservice was not becoming ready, let's give it one more try!\r\n\r\nstill the logger test :(\ntriggering the test again to check the logs on the ephemeral cluster\r\n/retest\nSo, it failed again but now I have some information from the ephemeral cluster:\r\n\r\nBoth `message-dumper` and `idvc-logger` isvcs start:" + ] + } + }, + "metadata": { + "tags": [ + "kserve", + "incubating", + "app-definition", + "lgtm", + "approved", + "size-l", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "kserve" + ], + "targetResourceKinds": [ + "Service", + "Configmap" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kserve/kserve/pull/1334", + "sourceRepo": "kserve/kserve", + "reactions": 2, + "comments": 29 + }, + "security": { + "scannedAt": "2026-02-27T17:44:36.723Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kserve/kserve-1780-fix-aws-region-and-add-aws-ca-bundle-to-s3-secrets-go.json b/solutions/cncf-generated/kserve/kserve-1780-fix-aws-region-and-add-aws-ca-bundle-to-s3-secrets-go.json new file mode 100644 index 00000000..b0426867 --- /dev/null +++ b/solutions/cncf-generated/kserve/kserve-1780-fix-aws-region-and-add-aws-ca-bundle-to-s3-secrets-go.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:41.819Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kserve: Fix AWS_REGION and add AWS_CA_BUNDLE to s3_secrets.go", + "description": "The boto3 package seems to look for AWS_DEFAULT_REGION, meanwhile\nkfserving uses AWS_REGION, that doesn't seem to be working\n(tested following\nhttps://github.com/kubeflow/kfserving/blob/master/docs/samples/storage/s3/README.md).\n\nThis change doesn't replace all occurrences of AWS_REGION, but only\nthe ones that set the environment variables.\n\nGH-1765\n\nAdd also the possibility to force boto to use a custom CA Bundle path when validating the TLS certificate of the S3 endpoint. Our use case is related to using an internal S3 endpoint (based on Openstack Swift) offering a TLS certificate signed by our internal CA.\n\nGH-1766\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #1765 #1766\n\n**Special notes for your reviewer**:\n\n1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.\n\n**Release no", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> Hi everybody, any feedback?\n\nThanks @elukey ! The change mostly lgtm, Can you help link the AWS S3 doc regarding the region variable so we can have a reference?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Hi @elukey. Thanks for your PR.\n\nI'm waiting for a [kubeflow](https://github.com/orgs/kubeflow/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/kubeflow/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=kubeflow%2Fkfserving).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\nHi! I am reasonably sure that the env variables are working as expected, we use a custom image for the storage-initializer that is basically a copy of the Dockerfile that you provide with little differences (like a debian base image etc..). I had to add ENV statement for AWS_DEFAULT_REGION and AWS_CA_BUNDLE to be able to make it work with my internal swift endpoint (as described above). I haven't fully tested the go code since replicating the setup is not easy, but the change is very small and should work as expected. If you want me to follow up with more testing please let me know :)\nHi everybody, any feedback?\n> Hi everybody, any feedback?\r\n\r\nThanks @elukey ! The change mostly lgtm, Can you help link the AWS S3 doc regarding the region variable so we can have a reference? \n> > Hi everybody, any feedback?\r\n> \r\n> Thanks @elukey ! The change mostly lgtm, Can you help link the AWS S3 doc regarding the region variable so we can have a reference?\r\n\r\nSure! Do you want only a reference in here or do you prefer also a reference in a code's comment? \r\n\r\nAWS Cli: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html\r\nBoto: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#using-environment-variables\n/ok-to-test\n> > > Hi everybody, any feedback?\r\n> > \r\n> > \r\n> > Thanks @elukey ! The change mostly lgtm, Can you help link the AWS S3 doc regarding the region variable so we can have a reference?\r\n> \r\n> Sure! Do you want only a reference in here or do you prefer also a reference in a code's comment?\r\n> \r\n> AWS Cli: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html\r\n> Boto: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#using-environment-variables\r\n\r\ncode comments will be helpful, thanks!\n/retest\nThe test to upload something to AWS s3 fails for some reason, will dig a bit more into it. Tried to modify the tests but didn't succeed. Any suggestion is welcome :)\nI see that in https://github.com/kubeflow/testing/search?q=AWS_REGION the variable is mentioned a lot, so this may explain why I am struggling with tests. But boto doesn't support it, more info: https://github.com/boto/boto3/issues/2574\nThe error that I see is:" + ] + } + }, + "metadata": { + "tags": [ + "kserve", + "incubating", + "app-definition", + "lgtm", + "approved", + "size-s", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "kserve" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kserve/kserve/pull/1780", + "sourceRepo": "kserve/kserve", + "reactions": 1, + "comments": 28 + }, + "security": { + "scannedAt": "2026-02-27T17:44:41.819Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kserve/kserve-1883-storage-py-add-more-logging-and-an-error-condition-for-s3.json b/solutions/cncf-generated/kserve/kserve-1883-storage-py-add-more-logging-and-an-error-condition-for-s3.json new file mode 100644 index 00000000..e284e12f --- /dev/null +++ b/solutions/cncf-generated/kserve/kserve-1883-storage-py-add-more-logging-and-an-error-condition-for-s3.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:38.973Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kserve: storage.py: add more logging and an error condition for s3", + "description": "The storage initializer for s3 ends up into the following\ntwo use cases:\n- If no object is present in the bucket, it doesn't raise\n any error, and later on the kserve-container fails since\n no model binary is present.\n- If a full object path is specified (like /a/b/../model.bin)\n then the s3 downloader will create the same path under\n /mnt/models, and if the kserve-container is not smart enough\n it will fail to find the model.bin.\nThe current storage-initializer logging doesn't give any hint\nor errors about the above use case, ending up in difficult-to-debug\nscenarios (see the related issue for more info).\n\nFixes: #1882\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #\n\n**Special notes for your reviewer**:\n\n1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.\n\n**Release note*", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> * If a full object path is specified (like /a/b/../model.bin)\n> then the s3 downloader will create the same path under\n> /mnt/models, and if the kserve-container is not smart enough\n> it will fail to find the model.bin.\n\nThis seems wrong we should fix that, the [code snippet](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/storage.py#L118) here is problematic.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kserve", + "incubating", + "app-definition", + "lgtm", + "approved", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "kserve" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kserve/kserve/pull/1883", + "sourceRepo": "kserve/kserve", + "reactions": 2, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:44:38.973Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kserve/kserve-4851-feat-bump-up-vllm-version-to-0-11-2-remove-python-3-9-support.json b/solutions/cncf-generated/kserve/kserve-4851-feat-bump-up-vllm-version-to-0-11-2-remove-python-3-9-support.json new file mode 100644 index 00000000..6e935306 --- /dev/null +++ b/solutions/cncf-generated/kserve/kserve-4851-feat-bump-up-vllm-version-to-0-11-2-remove-python-3-9-support.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:40.771Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kserve: feat: bump up vllm version to 0.11.2 & remove python 3.9 support", + "description": "**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #4850\n\n**Type of changes**\nPlease delete options that are not relevant.\n\n- [ ] Bug fix (non-breaking change which fixes an issue)\n- [x] New feature (non-breaking change which adds functionality)\n- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)\n- [ ] This change requires a documentation update\n\n**Feature/Issue validation/testing**:\n\nPlease describe the tests that you ran to verify your changes and relevant result summary. Provide instructions so it can be reproduced.\nPlease also list any relevant details for your test configuration.\n\n- [ ] Test A\n- [ ] Test B\n\n- Logs\n\n**Special notes for your reviewer**:\n\n1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.\n\n**Checklist**:\n\n- [x] Ha", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> [!NOTE] \n> Due to rebase issues, I created a new one and will continue the work here: https://github.com/kserve/kserve/pull/4851.\n\n**What this PR does / why we need it**:\n\n**What this PR does / why we need it**:\nThis PR upgrades the `huggingfaceserver` dependency on vLLM to **v0.10.1,1** and adapts our integration to be compatible with changes introduced in the 0.10.x \n\nKey updates:\n- Bump vLLM to **0.10.1.1** in `huggingfaceserver` dependencies.\n- Align engine initialization and decoding configuration with 0.10.x (e.g., argument normalization and defaults).\n- Maintain compatibility with prior request/response handling and streaming semantics.\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #\n\n**Type of changes**\nPlease delete options that are not relevant.\n\n- [ ] Bug fix (non-breaking change which fixes an issue)\n- [x] New feature (non-breaking change which adds functionality)\n- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)\n- [ ] This change requires a documentation update\n\n**Feature/Issue validation/testing**:\n\nPlease describe the tests that you ran to verify your changes and relevant result summary. Provide instructions so it can be reproduced.\nPlease also list any relevant details for your test configuration.\n\n- [ ] Test A\n- [ ] Test B\n\n- Logs\n\n**Special notes for your reviewer**:\n\n1. Please confirm t", + "steps": [ + "Please confirm t" + ], + "codeSnippets": [ + "**Re-running failed tests**\r\n\r\n- `/rerun-all` - rerun all failed workflows.\r\n- `/rerun-workflow ` - rerun a specific failed workflow. Only one workflow name can be specified. Multiple /rerun-workflow commands are allowed per comment.\n> [!NOTE] \r\n> Due to rebase issues, I created a new one and will continue the work here: https://github.com/kserve/kserve/pull/4851.\r\n\r\n\r\n\r\n**What this PR does / why we need it**:\r\n\r\n**What this PR does / why we need it**:\r\nThis PR upgrades the `huggingfaceserver` dependency on vLLM to **v0.10.1,1** and adapts our integration to be compatible with changes introduced in the 0.10.x \r\n\r\nKey updates:\r\n- Bump vLLM to **0.10.1.1** in `huggingfaceserver` dependencies.\r\n- Align engine initialization and decoding configuration with 0.10.x (e.g., argument normalization and defaults).\r\n- Maintain compatibility with prior request/response handling and streaming semantics.\r\n\r\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\r\nFixes #\r\n\r\n**Type of changes**\r\nPlease delete options that are not relevant.\r\n\r\n- [ ] Bug fix (non-breaking change which fixes an issue)\r\n- [x] New feature (non-breaking change which adds functionality)\r\n- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)\r\n- [ ] This change requires a documentation update\r\n\r\n**Feature/Issue validation/testing**:\r\n\r\nPlease describe the tests that you ran to verify your changes and relevant result summary. Provide instructions so it can be reproduced.\r\nPlease also list any relevant details for your test configuration.\r\n\r\n- [ ] Test A\r\n- [ ] Test B\r\n\r\n- Logs\r\n\r\n**Special notes for your reviewer**:\r\n\r\n1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.\r\n\r\n**Checklist**:\r\n\r\n- [ ] Have you added unit/e2e tests that prove your fix is effective or that this feature works?\r\n- [x] Has code been commented, particularly in hard-to-understand areas?\r\n- [ ] Have you made corresponding changes to the documentation?\r\n\r\n**Release note**:\r\n", + "**Re-running failed tests**\r\n\r\n- `/rerun-all` - rerun all failed workflows.\r\n- `/rerun-workflow ` - rerun a specific failed workflow. Only one workflow name can be specified. Multiple /rerun-workflow commands are allowed per comment.\n@sivanantha321 Could you please review this PR and trigger the CI tests when you have time? The build and tests are passing locally on my side.\r\n\n> @sivanantha321 Could you please review this PR and trigger the CI tests when you have time? The build and tests are passing locally on my side.\r\n\r\ntriggered.\n@csy1204 Can you run `make precommit` and commit the changes ?\n@sivanantha321 All set — ran make precommit and fixed the test failures. Thanks!\n/rerun-all\r\n\r\n
\r\npython-test.yml huggingfaceserver-cpu test result (100%)" + ] + } + }, + "metadata": { + "tags": [ + "kserve", + "incubating", + "app-definition", + "lgtm", + "approved" + ], + "category": "workloads", + "cncfProjects": [ + "kserve" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kserve/kserve/pull/4851", + "sourceRepo": "kserve/kserve", + "reactions": 2, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:44:40.771Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kuadrant/kuadrant-1164-add-observability-feature-flag-to-kuadrant-crd.json b/solutions/cncf-generated/kuadrant/kuadrant-1164-add-observability-feature-flag-to-kuadrant-crd.json new file mode 100644 index 00000000..5883251d --- /dev/null +++ b/solutions/cncf-generated/kuadrant/kuadrant-1164-add-observability-feature-flag-to-kuadrant-crd.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:00.630Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kuadrant: Add observability feature flag to Kuadrant CRD", + "description": "Closes #1167\nSupercedes #1143 (not using a fork to allow all tests to have access to secrets)\nAlso adds a new user guide as part of #1158 (separate PR to add it to docs site will follow)\n\nA new field is introduced to the Kuadrant CRD `spec.observability.enable`.\nIf `true`, various ServiceMonitors and PodMonitors will be created:\n\n- Kuadrant operator servicemonitor\n- Authorino operator servicemonitor\n- Limitador operator servicemonitor\n- DNS operator servicemonitor\n\nAlso, for each Gateway in the toplogy, the following monitors are created (depending on the Gateway provider):\n\n- Istio Gateways\n - Istiod ServiceMonitor\n - Istio gw pod (envoy) stats PodMonitor\n- Envoy Gateway Gateways\n - Envoy Gateway ServiceMonitor\n - Envoy Proxy Stats PodMonitor \n\nTo accomplish this, a new reconciler is added `observability_reconciler.go`\nAll monitors are added to the topology.\nThe 4 core component monitors are linked to the root Kuarant CR in the topology.\nThe other monitors are not linked, ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Closes #1065\n\nTODO:\n- [x] Verify pattern for setting up the reconcile function and monitoring client\n- [x] Create Gateway PodMonitor (istio & envoy)\n- [x] Refactor duplicate creation blocks\n- [x] Add controller test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "I have run some verifications steps successfully and works like a charm\r\n\r\n### Verification steps\r\n\r\n* Setup environment based on EnvoyGateway", + "* Apply the Kuadrant custom resource with observability enabled", + "* Wait for kuadrant instance to be ready" + ] + } + }, + "metadata": { + "tags": [ + "kuadrant", + "sandbox", + "app-definition", + "kind-enhancement" + ], + "category": "workloads", + "cncfProjects": [ + "kuadrant" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Secret" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/Kuadrant/kuadrant-operator/pull/1164", + "sourceRepo": "kuadrant/kuadrant-operator", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:49:00.630Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kube-ovn/kube-ovn-4437-feat-helm-new-chart-design.json b/solutions/cncf-generated/kube-ovn/kube-ovn-4437-feat-helm-new-chart-design.json new file mode 100644 index 00000000..6b515043 --- /dev/null +++ b/solutions/cncf-generated/kube-ovn/kube-ovn-4437-feat-helm-new-chart-design.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:14.398Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kube-ovn: feat(helm): new chart design", + "description": "This PR is about issue [#4330](https://github.com/kubeovn/kube-ovn/issues/4330) \n\nThis is a draft, it shouldn't change any resource name as to not break existing installations, but if someone wants to update using the Helm Chart, they'll obviously have to adapt their values.\n\nFixes #4330", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "Note that I have no clue what the difference is between DPDK and DPDK-hybrid, the DPDK docs only mention the hybrid version as far as I know", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kube-ovn", + "sandbox", + "app-definition", + "refactor", + "chart", + "size-xxl", + "lgtm" + ], + "category": "workloads", + "cncfProjects": [ + "kube-ovn" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubeovn/kube-ovn/pull/4437", + "sourceRepo": "kubeovn/kube-ovn", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:14.399Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kube-rs/kube-rs-1652-migrate-from-backoff-to-backon.json b/solutions/cncf-generated/kube-rs/kube-rs-1652-migrate-from-backoff-to-backon.json new file mode 100644 index 00000000..85a5a086 --- /dev/null +++ b/solutions/cncf-generated/kube-rs/kube-rs-1652-migrate-from-backoff-to-backon.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:18.439Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kube-rs: Migrate from `backoff` to `backon`", + "description": "## Motivation\n\n[backoff](https://github.com/ihrwein/backoff) has been unmaintained for a while now, which is starting to cause rustsec warnings (#1635). [backon](https://github.com/Xuanwo/backon) is a maintained alternative.\n\nFixes #1635.\n\n## Solution\n\nThis PR migrates all uses of backoff to backon. This is a pretty breaking change for anyone who interacts with our backoff system beyond letting `Controller` use its default.\n\n`backon::Backoff` doesn't have an equivalent to `backoff::Backoff::reset`, so this PR adds a new `ResettableBackoff` wrapper system for those use cases.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Motivation\n\nThe `backoff` crate is no longer maintained and it's pulling the `instant` which is also marked as unmaintained by RUSTSEC.\n\nThis PR fixes https://github.com/kube-rs/kube/issues/1635\n\n## Solution\n\nReplace the `backoff` dependency with `backon`. The former one is no longer maintained and is also pulling the `instant` crate, which has been marked as unmaintained by RUSTSEC.\n\nPrior to this commit the public API of kube-rs exposed a trait defined by the `backoff` crate. This commits introduces a new trait defined by kube-rs, which wraps the `backon` trait.\n\nI also had to introduce this new trait because the `backon::Backoff` trait doesn't have a `reset` method. kubers makes use of this method, hence the \"trick\" of defining our own trait solves this challenge. If you don't like this approach, I can try to go upstream to `backon` and propose the extension of their trait to include the `reset` method.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/home/n/Documents/kube-rs/target/debug/deps/kube_derive-81b5765bf8276125: error while loading shared libraries: libstd-ca74a2d9c5166d9f.so: cannot open shared object file: No such file or directory" + ] + } + }, + "metadata": { + "tags": [ + "kube-rs", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kube-rs" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kube-rs/kube/pull/1652", + "sourceRepo": "kube-rs/kube", + "reactions": 2, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:18.439Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kube-rs/kube-rs-348-generate-schema-for-crd-v1.json b/solutions/cncf-generated/kube-rs/kube-rs-348-generate-schema-for-crd-v1.json new file mode 100644 index 00000000..f1fc4556 --- /dev/null +++ b/solutions/cncf-generated/kube-rs/kube-rs-348-generate-schema-for-crd-v1.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:17.023Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kube-rs: Generate schema for CRD v1", + "description": "Schema is generated with `schemars`.\n\nAlso fixed the location of `subresources` and `additionalPrinterColumns`.\n\nCloses https://github.com/clux/kube-rs/issues/264\n\n---\n\nFor deriving CRD v1, the spec struct must include `schemars::JsonSchema`. ~~The schemas for subresources are currently not generated, but it should be possible to add the status subresource by requiring the status struct to have `schemars::JsonSchema` and extracting the generated schema similar to the spec.~~ If there is a status subresource, its struct must include `JsonSchema` as well.\n\nI'm not sure how to handle the new `schemars` dependency.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Added CRD v1 schema generation support: https://github.com/clux/kube-rs/pull/348", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "allOf:\r\n- properties:\r\n foo:\r\n ...", + "properties:\r\n foo:\r\n ...\r\nallOf:\r\n- properties:\r\n foo:\r\n ...", + "The generated schema currently looks like the following:" + ] + } + }, + "metadata": { + "tags": [ + "kube-rs", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kube-rs" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kube-rs/kube/pull/348", + "sourceRepo": "kube-rs/kube", + "reactions": 5, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:17.023Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubearmor/kubearmor-1253-feat-controller-move-deployments-to-kubearmorcontroller.json b/solutions/cncf-generated/kubearmor/kubearmor-1253-feat-controller-move-deployments-to-kubearmorcontroller.json new file mode 100644 index 00000000..beca23d4 --- /dev/null +++ b/solutions/cncf-generated/kubearmor/kubearmor-1253-feat-controller-move-deployments-to-kubearmorcontroller.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:24.047Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubearmor: feat(controller): Move deployments to KubeArmorController", + "description": "**Purpose of PR?**:\nFixes #1249 \n\n**Does this PR introduce a breaking change?**\nNot a breaking change but since deployments are being updated, updates would've to be carried accordingly.\n\n**If the changes in this PR are manually verified, list down the scenarios covered:**:\n* helm installation\n\n**Additional information for reviewer?** :\n* Deprecate KubearmorPolicy, KubearmorHostPolicy, KubearmorAnnotation controllers and use the single KubeArmorController instead.\n* Update helm charts\n\n**Checklist:**\n- [ ] Bug fix. Fixes #\n- [x] New feature (non-breaking change which adds functionality)\n- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)\n- [ ] This change requires a documentation update\n- [x] PR Title follows the convention of `(): `\n- [ ] Commit has unit tests\n- [ ] Commit has integration tests", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "NOTE: The [ci-test-ginkgo workflows](https://github.com/kubearmor/KubeArmor/blob/main/.github/workflows/ci-test-ginkgo.yml) are expected to fail rn as we don't have a kubearmor-controller image with the latest tag published to dockerhub yet. Use helm chart tests which build the image first for validating this PR", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "helm repo add kubearmor https://delusionaloptimist.github.io/charts/\r\nhelm upgrade --install kubearmor kubearmor/kubearmor --set kubearmorrelay.enabled=true -n kube-system" + ] + } + }, + "metadata": { + "tags": [ + "kubearmor", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kubearmor" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubearmor/KubeArmor/pull/1253", + "sourceRepo": "kubearmor/kubearmor", + "reactions": 1, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:24.047Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubearmor/kubearmor-581-refactor-k8s-handlers-to-make-requests-to-kube-apiserver.json b/solutions/cncf-generated/kubearmor/kubearmor-581-refactor-k8s-handlers-to-make-requests-to-kube-apiserver.json new file mode 100644 index 00000000..1314356c --- /dev/null +++ b/solutions/cncf-generated/kubearmor/kubearmor-581-refactor-k8s-handlers-to-make-requests-to-kube-apiserver.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:22.881Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubearmor: Refactor k8s-handlers to make requests to kube-apiserver", + "description": "Closes #531", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@afzalbin64 sorry for the delayed response! What's the current status of the PR? I wanted to know what should I be looking for and how can help out :) Thanks", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubearmor", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kubearmor" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubearmor/KubeArmor/pull/581", + "sourceRepo": "kubearmor/kubearmor", + "reactions": 4, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:48:22.881Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeclipper/kubeclipper-178-update-cron-schedule-parse-and-unit-test.json b/solutions/cncf-generated/kubeclipper/kubeclipper-178-update-cron-schedule-parse-and-unit-test.json new file mode 100644 index 00000000..146bca69 --- /dev/null +++ b/solutions/cncf-generated/kubeclipper/kubeclipper-178-update-cron-schedule-parse-and-unit-test.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:26.325Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeclipper: update cron schedule parse and unit test", + "description": "Signed-off-by: Liucw \n\n### What type of PR is this?\n/kind feature\n\n### What this PR does / why we need it:\nupdate the way to parse month-end cron schedule\n\n### Which issue(s) this PR fixes:\n\nFixes https://github.com/kubeclipper-labs/kubeclipper/issues/177\n\n### Special notes for reviewers:\n```\n```\n\n### Does this PR introduced a user-facing change?\n\n```release-note\nNone\n```\n\n### Additional documentation, usage docs, etc.:\n\n```docs\n\n```\n@x893675", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "LGTM label has been added.
Git tree hash: 20a6ddf716894e0bbe018c7eebd555403794d46f
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "### Additional documentation, usage docs, etc.:\r\n", + "@x893675 \n\n# [Codecov](https://codecov.io/gh/kubeclipper-labs/kubeclipper/pull/178?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kubeclipper-labs) Report\n> Merging [#178](https://codecov.io/gh/kubeclipper-labs/kubeclipper/pull/178?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kubeclipper-labs) (4164e57) into [master](https://codecov.io/gh/kubeclipper-labs/kubeclipper/commit/79f3c5c4b1adcc854674dc92badd8586a4669769?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kubeclipper-labs) (79f3c5c) will **increase** coverage by `0.41%`.\n> The diff coverage is `37.85%`.\n\n[![Impacted file tree graph](https://codecov.io/gh/kubeclipper-labs/kubeclipper/pull/178/graphs/tree.svg?width=650&height=150&src=pr&token=3PWFG2MHTE&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kubeclipper-labs)](https://codecov.io/gh/kubeclipper-labs/kubeclipper/pull/178?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kubeclipper-labs)" + ] + } + }, + "metadata": { + "tags": [ + "kubeclipper", + "sandbox", + "app-definition", + "kind-feature", + "approved", + "size-l", + "lgtm", + "release-note-none", + "dco-signoff--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubeclipper" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubeclipper/kubeclipper/pull/178", + "sourceRepo": "kubeclipper/kubeclipper", + "reactions": 0, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:26.325Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeedge/kubeedge-1982-update-vendor-to-kubernetes-1-18.json b/solutions/cncf-generated/kubeedge/kubeedge-1982-update-vendor-to-kubernetes-1-18.json new file mode 100644 index 00000000..e5d98bf4 --- /dev/null +++ b/solutions/cncf-generated/kubeedge/kubeedge-1982-update-vendor-to-kubernetes-1-18.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:09.622Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeedge: Update vendor to Kubernetes 1.18", + "description": "**What type of PR is this?**\n> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line:\n>\n> /kind api-change\n> /kind bug\n> /kind cleanup\n> /kind design\n> /kind documentation\n> /kind test\n> /kind failing-test\n/kind feature\n\n**What this PR does / why we need it**:\nUpdate vendor to Kubernetes 1.18\n\n**Which issue(s) this PR fixes**:\n\nFixes #1933 \n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\n\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thanks for the PR, looks like the k8s verion is 1.18.0 and not 1.18.5?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubeedge", + "graduated", + "app-definition", + "kind-feature", + "lgtm", + "approved", + "size-xxl" + ], + "category": "workloads", + "cncfProjects": [ + "kubeedge" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubeedge/kubeedge/pull/1982", + "sourceRepo": "kubeedge/kubeedge", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:44:09.622Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeedge/kubeedge-2223-update-kubernetes-version-to-1-19-3.json b/solutions/cncf-generated/kubeedge/kubeedge-2223-update-kubernetes-version-to-1-19-3.json new file mode 100644 index 00000000..9f7031c7 --- /dev/null +++ b/solutions/cncf-generated/kubeedge/kubeedge-2223-update-kubernetes-version-to-1-19-3.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:07.887Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeedge: update Kubernetes version to 1.19.3", + "description": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\nupdate Kubernetes version to 1.19.3\n\n**Which issue(s) this PR fixes**:\n\nFixes #2146\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\n\nWith updated to Kubernetes 1.19.3, here are changes:\n\nFile size change:\n cloudcore: 36458496 <- release 1.4: 46442976\n edgecore: 78478800 \t<- release 1.4: 101401880\n keadm: 36704256 \t<- release 1.4: 39941925\n\nMem Usaage change: \n cloudcore: Same\n edgecore: Same\n\n \t\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\nUpdate K8s version to x.19.3\n**Which issue(s) this PR fixes**:\n\nFixes #\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\n\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\r\n\r\n**What type of PR is this?**\r\n\r\n\r\n\r\n\r\n**What this PR does / why we need it**:\r\nUpdate K8s version to x.19.3\r\n**Which issue(s) this PR fixes**:\r\n\r\nFixes #\r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n", + "@daixiang0 WIP: 1.19.3 #2276 is for testing. Rio will close it. Please review this PR. Thanks.\n/assign @daixiang0 \n@dingyin get, while now CI resource is tight, better to post pr in forked repo for test :)\r\n\r\n\n> @dingyin get, while now CI resource is tight, better to post pr in forked repo for test :)\r\nThanks. We don't do it usually. But the test is passing locally, but it fails on TravisCI. So Rio had to tested using a PR and we figured out this is TravisCI Env setup issue. \r\n\r\n\nI know it, if you post a pr in your repo, it would schudle quickly and do not cost KubeEdge test resource.\nAlso, squash commits, they are too many.\n> Also, squash commits, they are too many.\r\n\r\nSure, just squashed some commits together. Now there are much less commits.\nThanks for the great work, and could you please add info to PR description including binary and mem size change after upgrade to 1.19.3?\nPlease fix the comments ASAP, we will cut release branch shortly :)\n> Thanks for the great work, and could you please add info to PR description including binary and mem size change after upgrade to 1.19.3?\r\n\r\nupdated PR description.\nThe compatibility of the Docker version is a blocking issue here, we should follow the K8s upstream, so the reason needs to be analyzed and it is best to solve it.\n> The compatibility of the Docker version is a blocking issue here, we should follow the K8s upstream, so the reason needs to be analyzed and it is best to solve it.", + "Just checked in my host with `docker 18.06`, and it worked fine. So what's the problem here? @dingyin @zzxgzgz\n> > The compatibility of the Docker version is a blocking issue here, we should follow the K8s upstream, so the reason needs to be analyzed and it is best to solve it.\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "kubeedge", + "graduated", + "app-definition", + "kind-api-change", + "kind-cleanup", + "kind-feature", + "lgtm", + "approved", + "size-xxl" + ], + "category": "workloads", + "cncfProjects": [ + "kubeedge" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kubeedge/kubeedge/pull/2223", + "sourceRepo": "kubeedge/kubeedge", + "reactions": 1, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:44:07.887Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeedge/kubeedge-2230-edged-inject-environment-var.json b/solutions/cncf-generated/kubeedge/kubeedge-2230-edged-inject-environment-var.json new file mode 100644 index 00000000..69eb0010 --- /dev/null +++ b/solutions/cncf-generated/kubeedge/kubeedge-2230-edged-inject-environment-var.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:05.566Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeedge: Edged: inject environment var", + "description": "Support inject environment var into container, including services, configMaps and secrets.\n\nSigned-off-by: Xiang Dai \n\n**What type of PR is this?**\n\n /kind bug\n\n**What this PR does / why we need it**:\n\nEnable environment var inject at edge side.\n\n**Which issue(s) this PR fixes**:\n\nFixes #2229\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nSupport environment var inject, including configmaps, services and secrets.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Test way:\n\n- cd /path/tp/kubeedge_repo\n- apply this pr\n- `./hack/local-up-kubeedge.sh`\n- `git clone https://github.com/kubernetes/kubernetes $GOPATH/src/k8s.io`\n- `cd $GOPATH/src/k8s.io/kubernetes`\n- `git checkout v1.18.6`\n- `make`\n- `./_output/local/go/bin/e2e.test --provider=local --kubeconfig=/root/.kube/config --ginkgo.focus=\" .* environment .*Conformance\"`", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubeedge", + "graduated", + "app-definition", + "size-xl", + "needs-rebase" + ], + "category": "workloads", + "cncfProjects": [ + "kubeedge" + ], + "targetResourceKinds": [ + "Service", + "Configmap", + "Secret" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubeedge/kubeedge/pull/2230", + "sourceRepo": "kubeedge/kubeedge", + "reactions": 2, + "comments": 25 + }, + "security": { + "scannedAt": "2026-02-27T17:44:05.566Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeedge/kubeedge-941-upgrade-to-kubernetes-v1-15.json b/solutions/cncf-generated/kubeedge/kubeedge-941-upgrade-to-kubernetes-v1-15.json new file mode 100644 index 00000000..73c84dfe --- /dev/null +++ b/solutions/cncf-generated/kubeedge/kubeedge-941-upgrade-to-kubernetes-v1-15.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:04.439Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeedge: Upgrade to Kubernetes v1.15", + "description": "**What type of PR is this?**\n/kind feature\n\n**What this PR does / why we need it**:\n* Upgrade to Kubernetes v1.15\n* Upgrade to go v1.12\n\n**Which issue(s) this PR fixes**:\nFixes #806 \n\n**Special notes for your reviewer**:\n@subpathdev is working together with me.\nThanks @subpathdev for the great work.", + "type": "upgrade", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n/kind feature\n\n**What this PR does / why we need it**:\nusing gomod instead of gopkg\n\n**Which issue(s) this PR fixes**:\nFixes #748\n\n**Special notes for your reviewer**:\nthis pr based on #941 \n@edisonxiang is working together with me\nThanks @edisonxiang for the great work", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubeedge", + "graduated", + "app-definition", + "kind-feature", + "lgtm", + "approved", + "size-xxl" + ], + "category": "workloads", + "cncfProjects": [ + "kubeedge" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubeedge/kubeedge/pull/941", + "sourceRepo": "kubeedge/kubeedge", + "reactions": 3, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:44:04.439Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeelasti/kubeelasti-178-helm-forward-values-to-elasti-trough-env-variables.json b/solutions/cncf-generated/kubeelasti/kubeelasti-178-helm-forward-values-to-elasti-trough-env-variables.json new file mode 100644 index 00000000..c96769d5 --- /dev/null +++ b/solutions/cncf-generated/kubeelasti/kubeelasti-178-helm-forward-values-to-elasti-trough-env-variables.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:28.284Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeelasti: helm: forward values to elasti trough env variables", + "description": "This allows operator to work in any namespace, not only in `elasti`.\nThis also allows forwarding values from helm chart to elasti pods.\n\nFixes #175 #182\n\nHad to update few third parties, as UTs & e2e didn't want to work (probably due to go1.25.1 or helm 3.18.6).", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "Hi @rethil, \n\nDo you think this issue: #182 is part of this scope? \nMaybe not namespace, but support any name too.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "**Applied to files:**\n- `operator/internal/controller/opsEndpointslices.go`\n\n
\n\n
\n🧬 Code graph analysis (8)\n\n
\noperator/internal/controller/opsDeployment.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n\n
\n
\nresolver/cmd/main.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n\n
\n
\noperator/cmd/main.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetOperatorConfig` (57-64)\n\n
\n\n
\n
\noperator/internal/controller/opsInformer.go (3)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n
\noperator/internal/controller/elastiservice_controller.go (1)\n\n* `ElastiServiceReconciler` (31-40)\n\n
\n
\noperator/internal/informer/informer.go (1)\n\n* `KeyParams` (337-342)\n\n
\n\n
\n
\noperator/internal/controller/elastiservice_controller_test.go (1)
\n\n
\npkg/config/config.go (5)\n\n* `EnvResolverNamespace` (10-10)\n* `EnvResolverDeploymentName` (11-11)\n* `EnvResolverServiceName` (12-12)\n* `EnvResolverPort` (13-13)\n* `EnvResolverProxyPort` (14-14)\n\n
\n\n
\n
\noperator/internal/informer/informer.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n\n
\n
\noperator/internal/controller/opsEndpointslices.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n\n
\n
\nresolver/internal/operator/RPCClient.go (1)
\n\n
\npkg/config/config.go (2)\n\n* `GetOperatorConfig` (57-64)\n* `GetKubernetesClusterDomain` (38-40)\n\n
\n\n
\n\n
\n\n
\n\n
\n🔇 Additional comments (21)
\n\n
\noperator/Makefile (1)
\n\n`152-152`: **LGTM! Controller-tools version update aligns with PR scope.**\n\nThe bump from v0.14.0 to v0.19.0 is consistent with the CRD schema changes throughout the PR.\n\n
\n
\noperator/config/crd/bases/elasti.truefoundry.com_elastiservices.yaml (2)
\n\n`6-6`: **LGTM! Controller-gen version updated consistently.**\n\nThe version bump to v0.19.0 matches the Makefile change and other CRD updates in this PR.\n\n---\n\n`99-101`: **LGTM! Required fields alignment addresses validation gap.**\n\nBased on the past review discussion, these fields were already required at the code level but not enforced in the CRD schema. This change aligns the schema with the actual implementation requirements.\n\n
\n
\noperator/internal/controller/elastiservice_controller_test.go (2)
\n\n`25-25`: **LGTM! Test now uses centralized configuration.**\n\nThe import aligns with the PR's goal of centralizing configuration management.\n\n---\n\n`58-62`: **Add missing cluster domain environment variable.**\n\nThe test should set the cluster domain to prevent panics if reconciliation code begins reading `GetKubernetesClusterDomain()`.\n\n\nApply this diff to add the missing environment variable:", + "
\n
\nresolver/internal/operator/RPCClient.go (1)
\n\n`38-46`: **Make operator host FQDN-safe and set HTTP client timeout.**\n\n- Current code always appends “..svc.” which breaks if ServiceName is already FQDN.\n- http.Client has no timeout; network issues can hang indefinitely.\n\nApply this diff:" + ] + } + }, + "metadata": { + "tags": [ + "kubeelasti", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kubeelasti" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/truefoundry/KubeElasti/pull/178", + "sourceRepo": "truefoundry/KubeElasti", + "reactions": 0, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:28.284Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeflow/kubeflow-1750-openmpi-support-volumes-and-volumemounts.json b/solutions/cncf-generated/kubeflow/kubeflow-1750-openmpi-support-volumes-and-volumemounts.json new file mode 100644 index 00000000..61fb42d0 --- /dev/null +++ b/solutions/cncf-generated/kubeflow/kubeflow-1750-openmpi-support-volumes-and-volumemounts.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:48.905Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeflow: [openmpi] support `volumes` and `volumeMounts`", + "description": "fixes #838 \n\nnow kubeflow support ks `0.11.0`, so no blocking issues to merge this feature.\n\nThis supports `volume` and `volumeMounts` parameter on `openmpi` package.\n\nThis enables users to mount arbitrary volumes to master and worker pods like below:\n\n```shell\nks param set ${COMPONENT} volumes '[{ \"name\": \"vol\", \"hostPath\": { \"path\": \"/mnt/vol\" }}]'\nks param set ${COMPONENT} volumeMounts '[{ \"name\": \"vol\", \"mountPath\": \"/mnt/vol\"}]'\n```\n\n---\nThis change is [\"Reviewable\"/](https://reviewable.io/reviews/kubeflow/kubeflow/1750)", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/area openmpi", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "ks param set ${COMPONENT} volumes '[{ \"name\": \"vol\", \"hostPath\": { \"path\": \"/mnt/vol\" }}]'\r\nks param set ${COMPONENT} volumeMounts '[{ \"name\": \"vol\", \"mountPath\": \"/mnt/vol\"}]'" + ] + } + }, + "metadata": { + "tags": [ + "kubeflow", + "incubating", + "app-definition", + "size-xs", + "lgtm", + "approved", + "area-openmpi", + "cla--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubeflow" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubeflow/kubeflow/pull/1750", + "sourceRepo": "kubeflow/kubeflow", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:44:48.905Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeflow/kubeflow-3515-refactor-kfctl-to-begin-address-a-number-of-issues-related-to-repo.json b/solutions/cncf-generated/kubeflow/kubeflow-3515-refactor-kfctl-to-begin-address-a-number-of-issues-related-to-repo.json new file mode 100644 index 00000000..95a1c71b --- /dev/null +++ b/solutions/cncf-generated/kubeflow/kubeflow-3515-refactor-kfctl-to-begin-address-a-number-of-issues-related-to-repo.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:47.960Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeflow: Refactor kfctl to begin address a number of issues related to repo management", + "description": "* kfctl needs to support multiple repositories since the configuration is\n now split across kubeflow/kubeflow and kubeflow/manifests\n\n* We should use KfDef.Status to pass information through the kfctl libraries\n that is temporary such as keeping track of locations where code is cached.\n\n * Prior to this repo the fields Repo and ManifestsRepo were reset to\n point to the local cachedir. This means those links were no longer valid\n if the app was moved to a different folder or machine\n\n * With this PR we add Spec.Repos to keep track of source of truth\n and Status.ReposCache to store information about the local cache\n\n * This way the app can always rebuild the cache if needed.\n\n* Use hashicorp's go-getter to provide a consistent syntax for downloading the\n repos. go-getter supports fetching URIs and specifying lots of variants (e.g pull) no need to invent our own syntax.\n\n* Define a SyncCache method on KfDef that will sync multiple repositories.\n\n* Refactor NewKfApp; split", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/assign @yanniszark @kunmingg", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ ☞ kfctl init dls-kf --config https://raw.githubusercontent.com/jlewi/kubeflow/kfctl_kfdef/bootstrap/config/kfctl_gcp_iap.0.6.yaml\r\n$ ☞ cd dls-kf\r\n$ ☞ kfctl generate all --zone=us-west1-a\r\nError: couldn't generate KfApp: (kubeflow.error): Code 500 with message: coordinator Generate failed for gcp: (kubeflow.error): Code 400 with message: could not generate deployment manager configs under gcp_config Error: could not copy deployment/gke/deployment_manager_configs/cluster.jinja to /Users/kdkasrav/dls-kf/gcp_config/cluster.jinja Error cannot create directory: open deployment/gke/deployment_manager_configs/cluster.jinja: no such file or directory\r\nUsage:\r\n kfctl generate [all(=default)|k8s|platform] [flags]\r\n\r\nFlags:\r\n --email string email if '--platform gcp'\r\n -h, --help help for generate\r\n --hostname string hostname if '--platform gcp'\r\n --ipName string ipName if '--platform gcp'\r\n --mount-local mount-local if '--platform minikube'\r\n -V, --verbose verbose output default is false\r\n --zone string zone if '--platform gcp'\r\n\r\ncouldn't generate KfApp: (kubeflow.error): Code 500 with message: coordinator Generate failed for gcp: (kubeflow.error): Code 400 with message: could not generate deployment manager configs under gcp_config Error: could not copy deployment/gke/deployment_manager_configs/cluster.jinja to /Users/kdkasrav/dls-kf/gcp_config/cluster.jinja Error cannot create directory: open deployment/gke/deployment_manager_configs/cluster.jinja: no such file or directory", + "couldn't generate KfApp: (kubeflow.error): Code 500 with message: coordinator Generate failed for gcp: (kubeflow.error): Code 400 with message: could not generate deployment manager configs under gcp_config Error: could not copy deployment/gke/deployment_manager_configs/cluster.jinja to /Users/kdkasrav/dls-kf/gcp_config/cluster.jinja Error cannot create directory: open deployment/gke/deployment_manager_configs/cluster.jinja: no such file or directory", + "util.py 71 INFO fatal: No names found, cannot describe anything.\r\nutil.py 71 INFO go build -i -gcflags 'all=-N -l' -ldflags \"-X main.VERSION=\" -o bin/kfctl cmd/kfctl/main.go\r\nutil.py 45 INFO Running: /mnt/test-data-volume/kubeflow-presubmit-kfctl-go-iap-istio-3515-83467d3-2962-3f57/src/kubeflow/kubeflow/\r\nbootstrap/bin/kfctl init /mnt/test-data-volume/kubeflow-presubmit-kfctl-go-iap-istio-3515-83467d3-2962-3f57/kfctl-3f57 -V --platform=gcp --version=pull/35\r\n15 --package-manager=kustomize@0caa70b4518859c0678d0d0e12c11a7e35345c5a --skip-init-gcp-project --disable_usage_report --project=kubeflow-ci-deployment --\r\nuse_istio" + ] + } + }, + "metadata": { + "tags": [ + "kubeflow", + "incubating", + "app-definition", + "size-xxl", + "lgtm", + "approved", + "cla--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubeflow" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubeflow/kubeflow/pull/3515", + "sourceRepo": "kubeflow/kubeflow", + "reactions": 1, + "comments": 27 + }, + "security": { + "scannedAt": "2026-02-27T17:44:47.960Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeflow/kubeflow-5118-fix-4842-pod-resources-limit.json b/solutions/cncf-generated/kubeflow/kubeflow-5118-fix-4842-pod-resources-limit.json new file mode 100644 index 00000000..3d376b87 --- /dev/null +++ b/solutions/cncf-generated/kubeflow/kubeflow-5118-fix-4842-pod-resources-limit.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:45.605Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeflow: Fix #4842 Pod resources limit", + "description": "Fix #4842 - issue was closed earlier, but not solved within Kubeflow repo by author\n\nSets CPU and memory limits of Notebooks equal to requests. This prevents\n- out of memory problems due to over-provisioning\n- users using more resources than they should", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/assign @kimwnasptd", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubeflow", + "incubating", + "app-definition", + "needs-ok-to-test", + "size-xs", + "lgtm", + "approved", + "cla--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubeflow" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubeflow/kubeflow/pull/5118", + "sourceRepo": "kubeflow/kubeflow", + "reactions": 5, + "comments": 26 + }, + "security": { + "scannedAt": "2026-02-27T17:44:45.605Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeflow/kubeflow-5237-add-support-for-tolerations-and-affinity-in-notebooks.json b/solutions/cncf-generated/kubeflow/kubeflow-5237-add-support-for-tolerations-and-affinity-in-notebooks.json new file mode 100644 index 00000000..da17ee0a --- /dev/null +++ b/solutions/cncf-generated/kubeflow/kubeflow-5237-add-support-for-tolerations-and-affinity-in-notebooks.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:44.575Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeflow: Add support for Tolerations and Affinity in Notebooks", + "description": "This PR adds support for toleration and affinity configs in the Notebook Spawner UI.\n\nResolves: #4433\n\nOptions presented to the user are specified inside `spawner_ui_config.yaml`. This example config allows users to ask for exclusive access to a node within node-pool called `notebook-n1-standard-2`:\n```yaml\nspawnerFormDefaults:\n ...\n affinityConfig:\n # The default `configKey` from the options list\n # If readonly, the default value will be the only option\n value: \"none\"\n # The list of available affinity configs\n options:\n - configKey: \"none\"\n displayName: \"None\"\n affinity: {}\n # (DESC) Pod gets an exclusive \"n1-standard-2\" Node\n # (TIP) set PreferNoSchedule taint on this node-pool\n # (TIP) enable cluster-autoscaler on this node-pool\n # (TIP) dont let users request more CPU/MEMORY than the size of this node\n - configKey: \"exclusive__n1-standard-2\"\n displayName: \"Exclusive: n1-standard-2\"\n affinity:\n # (Re", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fix #4842 - issue was closed earlier, but not solved within Kubeflow repo by author\n\nSets CPU and memory limits of Notebooks equal to requests. This prevents\n- out of memory problems due to over-provisioning\n- users using more resources than they should", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "spawnerFormDefaults:\r\n ...\r\n affinityConfig:\r\n # The default `configKey` from the options list\r\n # If readonly, the default value will be the only option\r\n value: \"none\"\r\n # The list of available affinity configs\r\n options:\r\n - configKey: \"none\"\r\n displayName: \"None\"\r\n affinity: {}\r\n # (DESC) Pod gets an exclusive \"n1-standard-2\" Node\r\n # (TIP) set PreferNoSchedule taint on this node-pool\r\n # (TIP) enable cluster-autoscaler on this node-pool\r\n # (TIP) dont let users request more CPU/MEMORY than the size of this node\r\n - configKey: \"exclusive__n1-standard-2\"\r\n displayName: \"Exclusive: n1-standard-2\"\r\n affinity:\r\n # (Require) Node having label: `node_pool=notebook-n1-standard-2`\r\n nodeAffinity:\r\n requiredDuringSchedulingIgnoredDuringExecution:\r\n nodeSelectorTerms:\r\n - matchExpressions:\r\n - key: \"node_pool\"\r\n operator: \"In\"\r\n values:\r\n - \"notebook-n1-standard-2\"\r\n # (Require) Node WITHOUT existing Pod having label: `notebook-name`\r\n podAntiAffinity:\r\n requiredDuringSchedulingIgnoredDuringExecution:\r\n - labelSelector:\r\n matchExpressions:\r\n - key: \"notebook-name\"\r\n operator: \"Exists\"\r\n namespaces: []\r\n topologyKey: \"kubernetes.io/hostname\"\r\n readOnly: false\r\n tolerationGroup:\r\n # The default `groupKey` from the options list\r\n # If readonly, the default value will be the only option\r\n value: \"none\"\r\n # The list of available tolerationGroup configs\r\n options:\r\n - groupKey: \"none\"\r\n displayName: \"None\"\r\n tolerations: []\r\n - groupKey: \"group_1\"\r\n displayName: \"Group 1: description\"\r\n tolerations:\r\n - key: \"key1\"\r\n operator: \"Equal\"\r\n value: \"value1\"\r\n effect: \"NoSchedule\"\r\n - key: \"key2\"\r\n operator: \"Equal\"\r\n value: \"value2\"\r\n effect: \"NoSchedule\"\r\n readOnly: false", + "apiVersion: kustomize.config.k8s.io/v1beta1\r\nkind: Kustomization\r\nresources:\r\n - XXXXX\r\n# ----------------\r\n# ↓ our changes ↓\r\n# ----------------\r\nimages:\r\n - name: gcr.io/kubeflow-images-public/jupyter-web-app\r\n newName: gcr.io/kubeflow-images-public/jupyter-web-app\r\n newTag: vmaster-ge4456300" + ] + } + }, + "metadata": { + "tags": [ + "kubeflow", + "incubating", + "app-definition", + "size-l", + "lgtm", + "approved", + "cla--yes", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "kubeflow" + ], + "targetResourceKinds": [ + "Pod", + "Namespace", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubeflow/kubeflow/pull/5237", + "sourceRepo": "kubeflow/kubeflow", + "reactions": 7, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:44:44.575Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeflow/kubeflow-7622-fix-notebook-server-images-with-non-root-securitycontext.json b/solutions/cncf-generated/kubeflow/kubeflow-7622-fix-notebook-server-images-with-non-root-securitycontext.json new file mode 100644 index 00000000..c98c2dcf --- /dev/null +++ b/solutions/cncf-generated/kubeflow/kubeflow-7622-fix-notebook-server-images-with-non-root-securitycontext.json @@ -0,0 +1,58 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:46.736Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubeflow: fix: notebook server images with non-root SecurityContext", + "description": "resolves https://github.com/kubeflow/kubeflow/issues/5808\n\n# What does this PR do?\n\nThis PR makes the following changes to the `example-notebook-servers`:\n\n- updates the version of [`s6-overlay`](https://github.com/just-containers/s6-overlay) to [`v3.2.0.0`](https://github.com/just-containers/s6-overlay/releases/tag/v3.2.0.0)\n- changes the primary GID of the `jovyan` user from `100` to `0`: \n - for backwards-compatibility, `jovyan` is still a member of `100`\n- fixed the fact that the IDEs could fail to start, but the container would stay running:\n - sets `S6_BEHAVIOUR_IF_STAGE2_FAILS` to `2` \n- fixed the fact that the STDERR of the IDEs was not being captured in the Pod logs:\n - runs `exec 2>&1` in the s6 `run` scripts\n- fixed the fact that RStudio would not print its logs to the container:\n - set `RS_LOGGER_TYPE` to `stderr`\n- removed the `finish` s6 script for RStudio, as this is no longer needed:\n - RStudio now catches the TERM signal and gracefully saves user work.\n-", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@kimwnasptd could you please take a look at this, because I would like to get it in for 1.9.0?\n\nIt's fairly straightforward, but very important as it allows Notebook to have strict `securityContext` settings without failing.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "What I mean by a \"typical\" container `securityContext` is one that drops all permissions prevents privilege escalation:", + "For example, here is a Notebook which has this error that uses the `kubeflownotebookswg/jupyter-scipy:v1.9.0-rc.1` image:", + "[restricted-v2.yaml.txt](https://github.com/user-attachments/files/16053326/restricted-v2.yaml.txt)" + ] + } + }, + "metadata": { + "tags": [ + "kubeflow", + "incubating", + "app-definition", + "size-l", + "lgtm", + "approved" + ], + "category": "workloads", + "cncfProjects": [ + "kubeflow" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Service", + "Namespace", + "Persistentvolumeclaim", + "Persistentvolume", + "Serviceaccount" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubeflow/kubeflow/pull/7622", + "sourceRepo": "kubeflow/kubeflow", + "reactions": 2, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:44:46.736Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kuberhealthy/kuberhealthy-537-fix-goroutine-leak-in-watchforcheckerpodshutdown.json b/solutions/cncf-generated/kuberhealthy/kuberhealthy-537-fix-goroutine-leak-in-watchforcheckerpodshutdown.json new file mode 100644 index 00000000..bfd62642 --- /dev/null +++ b/solutions/cncf-generated/kuberhealthy/kuberhealthy-537-fix-goroutine-leak-in-watchforcheckerpodshutdown.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:32.198Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kuberhealthy: fix goroutine leak in watchForCheckerPodShutdown()", + "description": "Fixes #533\n\nI fixed the leak on a heavily modified branch wit logging of goroutine ids including who creates which goroutine etc. This is the change which finally fixed the leak. I'm currently verifying that this version without all other changes also fixes the leak.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Should be a more inclusive fix for #537", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kuberhealthy", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kuberhealthy" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kuberhealthy/kuberhealthy/pull/537", + "sourceRepo": "kuberhealthy/kuberhealthy", + "reactions": 2, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:48:32.198Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-102819-add-o-extra-columns-format-option-to-kubectl-get.json b/solutions/cncf-generated/kubernetes/kubernetes-102819-add-o-extra-columns-format-option-to-kubectl-get.json new file mode 100644 index 00000000..285dab4a --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-102819-add-o-extra-columns-format-option-to-kubectl-get.json @@ -0,0 +1,59 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:24.956Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Add `-o extra-columns` format option to kubectl get", + "description": "Fixes https://github.com/kubernetes/kubernetes/issues/98368\n\nNote to reviewers: This PR is in draft while I get more feedback on if this desired and/or find more time to clean it up. Before merge, I would expect to clean up the logic a bit (notable - remove lazy error handling panics), and add tests.\n\nExamples:\n```\n$ ~/go/bin/kubectl get pod -o \"extra-columns=NAME:.metadata.name,IP:.status.podIP,IMAGE:.spec.containers[*].image\"\nNAME AGE NAME IP IMAGE\nnoproxy-688f47dc9-vxpbw 74m noproxy-688f47dc9-vxpbw 10.244.0.106 howardjohn/alpine-shell\nshell-7854df9c5-dc6ml 74m shell-7854df9c5-dc6ml 10.244.0.105 howardjohn/alpine-shell,gcr.io/istio-testing/proxyv2:latest\n$ ~/go/bin/kubectl get svc -o \"extra-columns=NAME:.spec.ports[*].name,PORT:.spec.ports[*].port,TARGET:.spec.ports[*].targetPort\"\nNAME AGE NAME PORT TARGET\nawake 6h13m http 80 80\nkubernetes 6h13m https 443 6443\nshell ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test\n/assign @eddiezane @soltysh", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ ~/go/bin/kubectl get pod -o \"extra-columns=NAME:.metadata.name,IP:.status.podIP,IMAGE:.spec.containers[*].image\"\r\nNAME AGE NAME IP IMAGE\r\nnoproxy-688f47dc9-vxpbw 74m noproxy-688f47dc9-vxpbw 10.244.0.106 howardjohn/alpine-shell\r\nshell-7854df9c5-dc6ml 74m shell-7854df9c5-dc6ml 10.244.0.105 howardjohn/alpine-shell,gcr.io/istio-testing/proxyv2:latest\r\n$ ~/go/bin/kubectl get svc -o \"extra-columns=NAME:.spec.ports[*].name,PORT:.spec.ports[*].port,TARGET:.spec.ports[*].targetPort\"\r\nNAME AGE NAME PORT TARGET\r\nawake 6h13m http 80 80\r\nkubernetes 6h13m https 443 6443\r\nshell 75m http 9087 9087\r\nsleep 6h13m http 80 80", + "#### Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.:", + "@howardjohn: This issue is currently awaiting triage.\n\nIf a SIG or subproject determines this is a relevant issue, they will accept it by applying the `triage/accepted` label and provide further guidance.\n\nThe `triage/accepted` label can be added by org members by writing `/triage accepted` in a comment.\n\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\nHi @howardjohn. Thanks for your PR.\n\nI'm waiting for a [kubernetes](https://github.com/orgs/kubernetes/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://git.k8s.io/community/community-membership.md#member) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=kubernetes%2Fkubernetes).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\n[APPROVALNOTIFIER] This PR is **NOT APPROVED**\n\nThis pull-request has been approved by: *howardjohn*\nTo complete the [pull request process](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process), please assign **deads2k** after the PR has been reviewed.\nYou can assign the PR to them by writing `/assign @deads2k` in a comment when ready.\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=kubernetes%2Fkubernetes).\n\n
\nNeeds approval from an approver in each of these files:\n\n- **[staging/src/k8s.io/cli-runtime/OWNERS](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/cli-runtime/OWNERS)**\n- **[staging/src/k8s.io/kubectl/OWNERS](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubectl/OWNERS)**\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n/ok-to-test\r\n/assign @eddiezane @soltysh \r\n\n/test pull-kubernetes-integration\r\nfor a flake. See #103472\r\n\nAny feedback on this? It has been open for 6 weeks without comments :slightly_smiling_face: \nAny feedback on this?\nSorry for the delay!\r\n\r\nThis makes sense to me and has community want.\r\n\r\nIt seems there are columns missing from the original output though?" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "area-kubectl", + "release-note", + "needs-rebase", + "size-l", + "kind-feature", + "sig-cli", + "cncf-cla--yes", + "ok-to-test", + "needs-priority", + "needs-triage" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/102819", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 37, + "comments": 22 + }, + "security": { + "scannedAt": "2026-02-27T17:44:24.957Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-102884-in-place-pod-vertical-scaling-feature.json b/solutions/cncf-generated/kubernetes/kubernetes-102884-in-place-pod-vertical-scaling-feature.json new file mode 100644 index 00000000..896c6316 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-102884-in-place-pod-vertical-scaling-feature.json @@ -0,0 +1,62 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:13.778Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: In-place Pod Vertical Scaling feature", + "description": "#### What type of PR is this?\n/kind feature\n/kind api-change\n\n#### What this PR does / why we need it:\nThis PR brings the following changes that **mostly** implement In-place Pod Vertical Scaling feature:\n1. API change for [In-place Pod Vertical Scaling](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1287-in-place-update-pod-resources) feature\n2. Implementation of CRI API changes to support In-Place Pod Vertical Scaling.\n3. Core implementation that enables In-place vertical scaling for pods, comprehensively tested with docker runtime.\n4. Comprehensive E2E tests to validate In-place pod vertical scaling feature.\n\n#### Which issue(s) this PR fixes: #9043 #110490 \n\nxref https://github.com/kubernetes/enhancements/issues/1287\n\n#### Special notes for your reviewer: \n\nAPI changes: See: https://github.com/kubernetes/kubernetes/pull/111946\n\nScheduler changes: See\nhttps://github.com/kubernetes/kubernetes/pull/102884/commits/231849a90853363900391aaa3f406867c8421489\nhttps://", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/hold", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "#### Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.:\r\n\r\n", + "**Jun 26th:**\r\nPodStatus.Resize has now been fully implemented. @thockin Please see below. I hope this cuts as as simple signal to the API user (VPA) as to what's going on with resize, so they may choose to take alternative action in the Deferred / Infeasible cases as allowed by their policy.", + "> Your PR comment says \"fixes\" for the KEP issues, but they are not actually fixed until the gate is finally removed. Please change those to \"xref\" or \"re\" or something non-magical.\r\n> \r\nChanged it to xref.\r\n\r\n> I'll do what I can to look at the code, specifically focusing on API stuff. I have to throw out a caveat: There are things I am not super familiar with, like adding subresources, which may need more eyeballs.\r\n\r\nsubresource is not in scope for alpha, we deferred it to beta. And we have time as we are looking at GA at-least n+2 version after alpha so that we can deal with n-2 version compatibility requirement by not dealing with it :)\r\n\r\n> \r\n> Thanks for such complete notes! It's awesome.\r\n> \r\n> The problem with it is that I have a hard time seeing what to pay attention to in each step. I know it's asking a lot, but it would help if you just removed the irrelevant parts. E.g.\r\n> \r\nDone - got rid of unnecessary stuff to focus on things that vary during resize\n@Random-Liu Please see change https://github.com/kubernetes/kubernetes/pull/102884/commits/e55169bac1ae354607df2b719a5235b87643d0b5 , it address most of your review feedback besides the TODOs and those tracked in https://github.com/kubernetes/enhancements/issues/1287 which need further discussion to address before Beta.\n/retest\n/test pull-kubernetes-e2e-gce-alpha-features\n/hold\r\nAPI review changes coming.\n/unhold\n/retest\n/retest\r\n\r\nIntegration test passes on my local machine.." + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "area-test", + "priority-important-soon", + "area-kubelet", + "sig-scheduling", + "area-apiserver", + "lgtm", + "sig-node", + "sig-api-machinery", + "release-note", + "size-xxl" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod", + "Namespace", + "Role", + "Node" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/102884", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 124, + "comments": 154 + }, + "security": { + "scannedAt": "2026-02-27T17:44:13.778Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-102889-wip-add-cronjob-timezone.json b/solutions/cncf-generated/kubernetes/kubernetes-102889-wip-add-cronjob-timezone.json new file mode 100644 index 00000000..f7a35f24 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-102889-wip-add-cronjob-timezone.json @@ -0,0 +1,58 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:34.536Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: WIP: Add cronjob timezone", + "description": "#### What type of PR is this?\n\n/kind feature\n/kind api-change\n\n#### What this PR does / why we need it:\n\nThis PR adds a new field to the `CronJobSpec` named `TimeZone` that allows users to choose the specific time zone to use when scheduling jobs. If not specified, the previous default behavior of using the local time for the `kube-controller-manager` is preserved.\n\nThe ability to specify time zones is useful for Kubernetes users in situations where application requirements necessitate running jobs in specific time zones. In situations where users don't have control over the control plane local time, this also allows users to choose a time zone that is consistent and known to them.\n\n#### Which issue(s) this PR fixes:\n\nFixes #47202\n\n#### Special notes for your reviewer:\n\n#### Does this PR introduce a user-facing change?\n\n```release-note\nAdded the ability to specify TimeZone as part of the CronJobSpec. \nThis allows the end-user to specify the time zone to be used when determining when to", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "cc @soltysh @alaypatel07", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "#### Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.:\r\n\r\n" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "release-note", + "needs-rebase", + "size-l", + "kind-api-change", + "kind-feature", + "sig-apps", + "cncf-cla--yes", + "needs-ok-to-test", + "do-not-merge-work-in-progress", + "do-not-merge-hold" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Job", + "Cronjob" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/102889", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 23, + "comments": 24 + }, + "security": { + "scannedAt": "2026-02-27T17:44:34.536Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-42873-add-kubectl-api-resources-command.json b/solutions/cncf-generated/kubernetes/kubernetes-42873-add-kubectl-api-resources-command.json new file mode 100644 index 00000000..514eb7bd --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-42873-add-kubectl-api-resources-command.json @@ -0,0 +1,63 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:32.469Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: add kubectl api-resources command", + "description": "**What this PR does / why we need it**:\nAs the RBAC role need to be related to resources. I think we can use the command to get the supported resources. \n\n```\n# ./cluster/kubectl.sh api-resources \nNAME SHORTNAMES APIGROUP NAMESPACED KIND\nbindings true Binding\ncomponentstatuses cs false ComponentStatus\nconfigmaps cm true ConfigMap\nendpoints ep true Endpoints\nevents ev true Event\nlimitranges limits true LimitRange\nnamespaces ns ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "you would also need to know the API group\n\nshowing a resource that allows no verbs is debatable", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Hi @xilabao. Thanks for your PR.\n\nI'm waiting for a [kubernetes](https://github.com/orgs/kubernetes/people) member to verify that this patch is reasonable to test. If it is, they should reply with `@k8s-bot ok to test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should join the org to skip this step.\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://github.com/kubernetes/community/blob/master/contributors/devel/pull-request-commands.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository. I understand the commands that are listed [here](https://github.com/kubernetes/test-infra/blob/master/prow/commands.md).\n
\n\n\n\n\nThis change is [\"Reviewable\"/](https://reviewable.kubernetes.io/reviews/kubernetes/kubernetes/42873)\n\n\ncc @kubernetes/sig-cli-feature-requests @kubernetes/sig-cli-pr-reviews @deads2k @smarterclayton \r\n\r\n@xilabao is there a proposal or issue related to this?\n> @xilabao is there a proposal or issue related to this?\r\n\r\nI have just added. https://github.com/kubernetes/kubernetes/issues/42932\nyou would also need to know the API group\r\n\r\nshowing a resource that allows no verbs is debatable\nPlease also take aggregated api-servers into consideration @xilabao \nIf you use discovery data, aggregated servers and TPR data will automatically work\n@liggitt I will updated it. But I have a question. which should we add, api servers or api group? \r\nanother question @shiywang mentioned. should we also support other format of output like json, yaml ?\nAPI group\nI would not expect someone to use this as an API… if they want that, they should use the actual discovery API", + "This is exactly what I expect, we should figure out the real use case of this command.\n@all PTAL\n@fabianofranz @liggitt @AdoHe @shiywang PTAL\n/assign @adohe\nping @fabianofranz @adohe \n@k8s-bot ok to test\n@k8s-bot verify test this\r\n@k8s-bot cvm gce e2e test this\nNeeds tests for test-cmd\nfixed. @fabianofranz @adohe \nping @fabianofranz @adohe\n/lgtm\n/approve\n@k8s-bot bazel test this\r\n@k8s-bot gce etcd3 e2e test this\r\n@k8s-bot kubemark e2e test this\r\n@k8s-bot unit test this\r\n@k8s-bot kops aws e2e test this\r\n@k8s-bot verify test this\n@fabianofranz @adohe \n/release-note\nadd three flags. @fabianofranz @adohe @liggitt @janetkuo PTAL" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "lgtm", + "release-note", + "size-l", + "kind-feature", + "approved", + "sig-cli", + "cncf-cla--yes", + "priority-important-longterm" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Service", + "Ingress", + "Configmap", + "Secret", + "Statefulset", + "Daemonset" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/42873", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 24, + "comments": 60 + }, + "security": { + "scannedAt": "2026-02-27T17:44:32.469Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-46517-port-forward-listen-on-address.json b/solutions/cncf-generated/kubernetes/kubernetes-46517-port-forward-listen-on-address.json new file mode 100644 index 00000000..cab154ce --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-46517-port-forward-listen-on-address.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:15.688Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: port-forward listen on address", + "description": "**What this PR does / why we need it**:\n\nImplements #43962 proposal. Adds `--address` flag to port-forward command that allows listening on addresses other then localhost, so that port-forward can ie. be opened to consumers other then residing in local host like running in docker or different machine/vm\n\n**Which issue this PR fixes**: \n\nfixes #43962, fixes #36152, fixes #29678\n\n**Release note**:\n```\nallows selecting non-localhost addresses to listen on with port-forward\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@goblain Are you still working on this? If so, please rebase. I can help review this.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "allows selecting non-localhost addresses to listen on with port-forward", + "$ kubectl config set-context minikube\r\n$ kubectl proxy --accept-hosts='.*' --address='0.0.0.0'", + "$ kubectl config set-cluster machineA --server=http://machineA.corporate.domain:8001 --insecure-skip-tls-verify=true\r\n$ kubectl config set-context machineA-context --cluster=machineA\r\n$ kubectl config use-context machineA-context\r\n$ kubectl --namespace=kube-system port-forward nginx-ingress-controller-2wxt6 8080" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "area-kubectl", + "lgtm", + "sig-api-machinery", + "release-note", + "size-l", + "approved", + "sig-cli", + "cncf-cla--yes", + "sig-testing" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/46517", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 87, + "comments": 48 + }, + "security": { + "scannedAt": "2026-02-27T17:44:15.688Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-63641-add-size-limit-to-tmpfs.json b/solutions/cncf-generated/kubernetes/kubernetes-63641-add-size-limit-to-tmpfs.json new file mode 100644 index 00000000..5fb94715 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-63641-add-size-limit-to-tmpfs.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:21.281Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Add size limit to tmpfs", + "description": "**What this PR does / why we need it**:\nAdd a size option when mount tmpfs \n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #63126\n\n**Special notes for your reviewer**:\ni run a pod like below \n```\napiVersion: v1\nkind: Pod\nmetadata:\n name: busybox-1\n namespace: default\nspec:\n containers:\n - command:\n - sleep\n - \"360000\"\n image: busybox\n imagePullPolicy: IfNotPresent\n name: busybox\n resources: {}\n terminationMessagePath: /dev/termination-log\n terminationMessagePolicy: File\n volumeMounts:\n - name: foo\n mountPath: /data/mysql\n resources:\n limits:\n memory: 1229Mi\n requests:\n cpu: 500m\n memory: 1Gi\n volumes:\n - name: foo\n emptyDir:\n sizeLimit: \"350Mi\"\n medium: \"Memory\"\n dnsPolicy: ClusterFirst\n restartPolicy: Always\n schedulerName: default-scheduler\n securityContext: {}\n terminat", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/assign @jingxu97", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n name: busybox-1\r\n namespace: default\r\nspec:\r\n containers:\r\n - command:\r\n - sleep\r\n - \"360000\"\r\n image: busybox\r\n imagePullPolicy: IfNotPresent\r\n name: busybox\r\n resources: {}\r\n terminationMessagePath: /dev/termination-log\r\n terminationMessagePolicy: File\r\n volumeMounts:\r\n - name: foo\r\n mountPath: /data/mysql\r\n resources:\r\n limits:\r\n memory: 1229Mi\r\n requests:\r\n cpu: 500m\r\n memory: 1Gi\r\n volumes:\r\n - name: foo\r\n emptyDir:\r\n sizeLimit: \"350Mi\"\r\n medium: \"Memory\"\r\n dnsPolicy: ClusterFirst\r\n restartPolicy: Always\r\n schedulerName: default-scheduler\r\n securityContext: {}\r\n terminationGracePeriodSeconds: 30", + "/assign @thockin\r\ncc @jingxu97 \n/ok-to-test\n/retest\n/assign @jingxu97 \n/retest \n/retest\n/lgtm\nPlease add a release note\n@dims added the release note and may be you have some better describe for the release note?\nHow about this?\r\n\r\n`Fixed emptyDir to use the sizeLimit when creating the temporary volumes. Before this fix the tmpfs volume size was set to half of the available RAM (default linux kernel behavior)`\nWe need to carefully consider and document the behavior implications of this. The emptyDir sizeLimit for non-tmpfs volumes is a soft limit, ie pods get evicted vs IO error in the app. But this change makes it a hard limit for tmpfs. So users are going to see different behavior of sizeLimit depending on the emptydir type, and that could be confusing.\r\n\r\nI think this same difference exists when handing limits for cpu/memory, so it could just be a matter of good documentation.\n@msau42 ok i will update the document about tmpdir in the website \nI want to double check the use case of this PR. Currently, for emptyDir backed by memory, if the usage exceeds limit, the pod will be gracefully terminated and rescheduled. After this PR, the memory is a hard limit, if the emptyDir usage exceeds the limit, applications might get errors instead of evicted. Is this the desired behavior in what use cases?\nI hate to be the naysayer, but the way this is presented through the API implies that it has meaning regardless of `medium`. But it doesn't. It is totally ignored for both `medium: \"\"` and `medium: HugePages`. We either need to:\r\n\r\na) Implement it for those (not sure how to do it for `\"\"` except by something like grpquota which requires a supplemental GID to be added per volume)\r\n\r\nb) Error on validation (ugly)\r\n\r\nc) Change the API so it applies to just memory. e.g.", + "I prefer (c), unless we can make (a) work.\r\n\r\nJing's concern is legit - probably should validate that during API validation.\r\n\r\nWe must also document that this comes out of the pod's overall memory limit (right?).\r\n\r\nAt some point there was discussion about how we might limit this in alignment with the ephemeral storage proposals - @jingxu97 ?\nSorry, some of what I said was already merged, and I just forgot :)\r\n\r\nSome of it is still valid.\nthis requires some special review - especially for when the /tmpfs is reclaimed in pod lifecycle. ultimately usage is capped by memory cgroup. like tim, i need some time to reflect on impact of this.\r\n\n@jingxu97 i think we should discuss this some in sig-node meeting this week.\nFWIW I think it should be fixed, but we need to make sure we understand HOW\nand WHY :)\n\nOn Mon, May 14, 2018 at 3:27 PM Derek Carr wrote:\n\n> @jingxu97 i think we should discuss this\n> some in sig-node meeting this week.\n>\n> —\n> You are receiving this because you were assigned.\n> Reply to this email directly, view it on GitHub\n> ,\n> or mute the thread\n> \n> .\n>\n\n@derekwaynecarr sure, we can discuss more in sig-node. But I was in f2f meeting this Tuesday. Is that ok for next sig-node meeting? Thanks!\nany update for this ?\n/unassign\nJust my $0.02 as a user but I find it _very_ surprising that, when I violate a size limit, instead of getting an error that my code can handle I just get my container shut down (albeit gracefully.) It's definitely very important to ruminate on the impact this change could cause, and possibly change the API so people don't get suddenly surprised by the change, but you should definitely let users actually know that they violated the size limit when they do, so they can work around it rather than just getting evicted and then violating the limit on another node, etc. etc. ;)\nIf memory limits for tmpfs becomes an explicit config knob, what would the overall pod memory limit be? Imagine a pod with tmpfs limit of `2Gi`, and a container with memory limit `1Gi`, then will the pod limit be `3Gi`? The container will still get OOM killed if it writes more than `1Gi` to it's tmpfs. If the pod limit is `1Gi` then the tmpfs limit doesn't make any sense except in the case of container restarts (which is a corner case).\r\n\r\nFor this PR to make sense, we need kernel support to charge tmpfs usage to the pod level instead of container/first-touch level. \r\n\nI think having a tmpfs volume contribute to the same limit as pod memory is what makes it hard to use.\r\n\r\nIdeally you would have a limit on the tmpfs which would give you out of space errors if you try to write more data than the limit, but it would not contribute to the memory limit set on the containers in the pod. Obviously you need to take all limits (or requests really) into account when scheduling the pod.\r\nThe fact that tmpfs is using memory is just an implementation detail in my mind, it's still a volume and other volume types are treated as a separate resource from the pod (for obvious reasons), but this makes it easy to reason about how much you can write to your volume vs. how much memory your process can consume and you don't have to worry that these concerns/limits might overlap.\r\n\r\nWhether it's feasible to implement it like this with the way pods are implemented, I'm not sure?\n@vishh I think what you describe won't cause oom ,It just will be a disk full error. I will test this later.\n/uncc\nI am very interested in this PR, but since it is idle, I am unassigning myself. PLEASE add me back when you're ready to proceed.\nhttps://github.com/kubernetes/kubernetes/pull/63641#issuecomment-405003281\r\nhttps://github.com/kubernetes/kubernetes/pull/63641#issuecomment-404924118\r\n\r\nTo be honest, I have the same confusion.\r\n\r\n@lovejoy , Is there any test conclusion to share now? Thanks.\n@warmchang @vishh @mikkeloscar \r\n\r\nI did a test : a pod with tmpfs limit of 200M and memory limit of 100M, and write 150M file to empty_dir like below and the pod didn't oom" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "priority-important-soon", + "sig-storage", + "sig-node", + "release-note", + "size-s", + "cncf-cla--yes" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod", + "Namespace" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/63641", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 44, + "comments": 81 + }, + "security": { + "scannedAt": "2026-02-27T17:44:21.281Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-68719-add-statefulset-kubernetes-io-pod-ordinal-label.json b/solutions/cncf-generated/kubernetes/kubernetes-68719-add-statefulset-kubernetes-io-pod-ordinal-label.json new file mode 100644 index 00000000..31e7e8bc --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-68719-add-statefulset-kubernetes-io-pod-ordinal-label.json @@ -0,0 +1,59 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:23.993Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Add statefulset.kubernetes.io/pod-ordinal label", + "description": "**What this PR does / why we need it**:\nThis PR makes the ordinal number of each pod in a statefulset available with the label `statefulset.kubernetes.io/pod-ordinal`.\n\nThis enables direct access to each pod’s ordinal number as an environment variable via the downward API:\n\n```yaml\nenv:\n- name: POD_ORDINAL\n valueFrom:\n fieldRef:\n fieldPath: metadata.labels['statefulset.kubernetes.io/pod-ordinal']\n```\n\n**Which issue(s) this PR fixes**:\nFixes #30427 and fixes #40651 \n\n**Special notes for your reviewer**:\n\n**Release note**:\n\n```release-note\nStatefulSet controller will create a label, `statefulset.kubernetes.io/pod-ordinal`, for the ordinal number of each Pod in a StatefulSet. This enables access to the ordinal number of each Pod controlled by a StatefulSet via the downward API.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> @liggitt do you have an opinion on bypassing alpha for this label? I think I'd say we follow field rules here. We _could_ put the label behind a feature gate for one release, which also accomplishes this goal.\n\nI assume you mean whether to add the label at all, rather than whether to include \".alpha.\" in the label key?\n\nBecause the fields are in metadata, there's no loss of data in skew or downgrade scenarios.\n\nOn upgrade, will the controller add the label to existing pods?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "env:\r\n- name: POD_ORDINAL\r\n valueFrom:\r\n fieldRef:\r\n fieldPath: metadata.labels['statefulset.kubernetes.io/pod-ordinal']", + "/assign @kow3ns\n/kind feature\n/assign @janetkuo\n@dixudx I certainly agree that it is trivial to parse the ordinal number from the pod name. It is a usability concern, however, as outlined by many comments in the linked github issues. Directly accessing the ordinal number as an environment variable is less brittle (every user does not need to implement their own logic) and more flexible (no need to override the entrypoint or use pod lifecycle events in order to support \"off the shelf\" images as communicated in comments on linked issues).\n/cc @kubernetes/sig-apps-feature-requests\n@jlegrone: Reiterating the mentions to trigger a notification: \n@kubernetes/sig-apps-feature-requests\n\n\n
\n\nIn response to [this](https://github.com/kubernetes/kubernetes/pull/68719#issuecomment-422421415):\n\n>/cc @kubernetes/sig-apps-feature-requests\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n/ok-to-test\r\n\r\nThis makes sense to me, having written my fair share of annoying entrypoint wrapper scripts to parse the Pod name via `$(hostname)` into an ordinal env var. The addition of the `pod-name` label helped a little, but you still need the ugly wrapper script to parse the Pod name if you want the ordinal, and I've seen many people get this wrong (e.g. their scripts get confused if the StatefulSet name itself also contains a match for `-(\\d+)`).\r\n\r\nFor anyone wondering why we couldn't have just added `pod-ordinal` to begin with instead of `pod-name`, it's because `pod-name` addresses a different use case. The purpose of `pod-name` was to have a label selector `pod-name: blah-N` that's guaranteed to select only one Pod in a given namespace. By contrast, a selector built from (StatefulSet's label selector + `pod-ordinal: N`) is not guaranteed to only select one Pod because multiple StatefulSets' label selectors can overlap or even be identical.\r\n\r\nI think it makes sense to have both labels. The ordinal of a StatefulSet Pod meets the criteria to be a label because it's part of the identity of the Pod, and the linked issues show that it's useful.\r\n\r\nDoes anyone have any remaining objections to this?\n@smarterclayton you were involved in the previous discussions on the topic. Does the api look good to you?\n/hold\r\n\r\nwhile we resolve the alpha API question @kubernetes/api-reviewers \nCan you articulate the use case for selecting by an ordinal as justification for this not just being an annotation (which is a smaller API change)?\n> Can you articulate the use case for selecting by an ordinal as justification for this not just being an annotation (which is a smaller API change)?\r\n\r\n@smarterclayton I don't think it will ever be a common case, but if a user (or more likely a CRD controller) is orchestrating several statefulsets to work together in some way, for example to represent replicated shards in a database, then it may be desirable to select a cross-section of pods controlled by those statefulsets.\r\n\r\nGiven this topology:\r\n\r\n| sfs `shard-a` | sfs `shard-b` | sfs `shard-c` |\r\n|---------------------------|---------------------------|---------------------------|\r\n| master (pod `shard-a-0`) | master (pod `shard-b-0`) | master (pod `shard-c-0`) |\r\n| replica (pod `shard-a-1`) | replica (pod `shard-b-1`) | replica (pod `shard-c-1`) |\r\n| replica (pod `shard-a-2`) | replica (pod `shard-b-2`) | replica (pod `shard-c-2`) |\r\n\r\nUsers could leverage a label to view the state of only `replica` pods:\r\n\r\n`kubectl get pods -l tier=db,statefulset.kubernetes.io/pod-ordinal!=0`\r\n\r\nOr define a service to send traffic only to `master` pods:" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "area-test", + "release-note", + "needs-rebase", + "size-m", + "kind-api-change", + "kind-feature", + "sig-apps", + "cncf-cla--yes", + "sig-architecture", + "do-not-merge-hold" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod", + "Statefulset" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/68719", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 38, + "comments": 61 + }, + "security": { + "scannedAt": "2026-02-27T17:44:23.993Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-69263-respect-allocation-ids.json b/solutions/cncf-generated/kubernetes/kubernetes-69263-respect-allocation-ids.json new file mode 100644 index 00000000..3ea3ce03 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-69263-respect-allocation-ids.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:30.016Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Respect Allocation IDs", + "description": "**What this PR does / why we need it**:\nAWS NLB supports the use of static IP addresses (EIP) with Network Load Balancers. This PR supports a new annotation on services `service.beta.kubernetes.io/aws-load-balancer-eip-allocations` which is a comma separated list of AWS Allocation IDs. The number of Allocation IDs must match the number of subnets used for the load balancer. \n\n/kind feature\n/sig aws\n\nFixes #63959 \n\n**Special notes for your reviewer**:\nNone \n\n**Release note**:\n```release-note\nCreates an annotation 'service.beta.kubernetes.io/aws-load-balancer-eip-allocations' to assign AWS EIP to the newly created Network Load Balancer. Number of allocations and subnets must match.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@brooksgarrett Thanks for your first contribution to Kubernetes! :tada: \n\n/ok-to-test\n\nThis change it looks like a release note?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "lgtm", + "area-cloudprovider", + "release-note", + "size-m", + "kind-feature", + "approved", + "cncf-cla--yes", + "sig-cloud-provider", + "ok-to-test" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/69263", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 28, + "comments": 39 + }, + "security": { + "scannedAt": "2026-02-27T17:44:30.016Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-69867-allow-updates-patches-to-pod-disruption-budgets.json b/solutions/cncf-generated/kubernetes/kubernetes-69867-allow-updates-patches-to-pod-disruption-budgets.json new file mode 100644 index 00000000..75565739 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-69867-allow-updates-patches-to-pod-disruption-budgets.json @@ -0,0 +1,60 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:18.420Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Allow updates/patches to pod disruption budgets", + "description": "**What this PR does / why we need it**: It removes the immutability of pod disruption budgets. Encouraging teams to use Pod Disruption Budgets is a vital part of us allowing us to effectively perform maintenance on our clusters without inadvertently taking out one of the services running within it. With the present Pod Disruption Budgets being immutable this presents problems incorporating them into application charts (neither kubectl with force or helm can properly handle changing a pdb object - forcing an administrator to delete and re-create it). It has been said in the issue that there is no longer any design reason requiring pdbs to be immutable, so let's remove this control and allow them to be patched.\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #45398\nhttps://github.com/kubernetes/kubernetes/issues/45398\n**Special notes for your reviewer**:\nI have removed ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/kind feature\n/sig api-machinery\n/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/kind feature\r\n/sig api-machinery\r\n/ok-to-test\r\n\n/sig apps\r\n/remove-sig api-machinery\r\n\n@davidmccormick \r\nplease add a release note explaining the change.\r\n\nok have added a release note - does it look ok?\n@davidmccormick \r\nnormally RN are kept down to a sentence or two.\r\nthe title seems like a good one.\r\n> Allow updates/patches to pod disruption budgets\r\n\r\nthe tool that collects release notes also inserts a PR link next to them for better context.\r\n\r\ni would defer to the maintainers for further comments on the PR itself.\r\nthanks.\r\n\nok thanks, have updated the release note as suggested\n@hongchaodeng @janetkuo can you review this PR please?\n@mattfarina @janetkuo \r\nhey folks,\r\nany chance to have a look at it?\n@kubernetes/sig-apps-feature-requests @kubernetes/sig-apps-pr-reviews can some review or comment. This seems like we should do ?\n@davidmccormick Sorry for the delay. Are you able to pick this back up? I personally want to see this happen too and I'll volunteer to be a reviewer and help track down an approver. If you don't have time to continue this, please let us know so we can open a new PR.\n/assign\nThanks for your pull request. Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\n:memo: **Please follow instructions at to sign the CLA.**\n\nIt may take a couple minutes for the CLA signature to be fully registered; after that, please reply here with a new comment and we'll verify. Thanks.\n\n---\n\n- If you've already signed a CLA, it's possible we don't have your GitHub username or you're using a different email address. Check your existing CLA data and verify that your [email is set on your git commits](https://help.github.com/articles/setting-your-email-in-git/).\n- If you signed the CLA as a corporation, please sign in with your organization's credentials at to be authorized.\n- If you have done the above and are still having issues with the CLA being reported as unsigned, please email the CNCF helpdesk: helpdesk@rt.linuxfoundation.org\n\n\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository. I understand the commands that are listed [here](https://go.k8s.io/bot-commands).\n
\n\t\nHi @enisoc, thanks so much for picking this up!! I am happy to work on this, I don't have any experience of the integration or e2e test suites but I'm happy to learn if you don't mind providing some guidance? A colleague of mine has some experience here and so he's giving me a bit of intro to them.\nFor e2e, I recommend starting by making a copy of this bare-bones test case and place it just below:\r\n\r\nhttps://sourcegraph.com/github.com/kubernetes/kubernetes@7693a1d5fe2a35b6e2e205f03ae9b3eddcdabc6b/-/blob/test/e2e/apps/disruption.go#L56-58\r\n\r\nThen look at the body of the table-based test below, and start pulling out some additional steps you'll need:\r\n\r\nhttps://sourcegraph.com/github.com/kubernetes/kubernetes@7693a1d5fe2a35b6e2e205f03ae9b3eddcdabc6b/-/blob/test/e2e/apps/disruption.go#L148-216\r\n\r\nThe test I'm envisioning would start out similarly:\r\n\r\n1. Create Pods and RS.\r\n2. Create PDB.\r\n3. Find a running Pod.\r\n4. Try to evict that Pod.\r\n5. Check that the eviction is denied.\r\n\r\nBut then you would add a second phase:\r\n\r\n6. Update the PDB in-place to change minAvailable so the eviction will be allowed.\r\n7. Try the eviction again on the same Pod. You might need to retry with a timeout, since the change will take effect asynchronously.\r\n8. Check that the eviction eventually succeeds within a reasonable time (30s?).\r\n\r\nThe easiest way to run e2e is to push to your PR branch, but the round-trip time is very long. You can find instructions for running e2e locally here:\r\n\r\nhttps://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/e2e-tests.md\r\n\r\nOnce we're on track for e2e we can talk about integration tests. Let me know if you need more pointers. Good luck and thanks for helping to fix this for all users!\n@enisoc Hi, apologies on the delay in getting this work done! I have now got a bit more up-to-speed with the e2e test suit and have written an e2e test that covers the scenario you suggested. Does it look ok to you?\r\n\r\nYou should be able to run the test on its own: -", + "/retest\n> There can be races between eviction requests and PDB mutations, but I would argue that allowing mutations does not increase the risk any more than we already accept just because PDBs can be deleted and recreated.\r\n\r\nI agree. Very glad to see movement on this.\nHmm by taking out the time.Sleep I now occasionally get a race error when running my test stating that my PDB is not ready when I try an eviction...", + "> Hmm by taking out the time.Sleep I now occasionally get a race error when running my test stating that my PDB is not ready when I try an eviction...\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "area-test", + "area-apiserver", + "lgtm", + "release-note", + "size-l", + "kind-feature", + "sig-apps", + "approved", + "cncf-cla--yes", + "sig-testing" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod", + "Service" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/69867", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 48, + "comments": 40 + }, + "security": { + "scannedAt": "2026-02-27T17:44:18.420Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-74526-support-scaling-hpa-to-from-zero-pods-for-object-external-metri.json b/solutions/cncf-generated/kubernetes/kubernetes-74526-support-scaling-hpa-to-from-zero-pods-for-object-external-metri.json new file mode 100644 index 00000000..76e8ad87 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-74526-support-scaling-hpa-to-from-zero-pods-for-object-external-metri.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:31.491Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Support scaling HPA to/from zero pods for object/external metrics", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**What this PR does / why we need it**:\nThis PR targets worker deployments that use queues and scaling is based on object or external metric that depends on queue size. When workers are idle it is possible to scale corresponding deployment to zero replicas and save resources.\n\nThis technique is especially useful when workers request GPU resources and the amount of different idling worker types exceeds number of available GPUs.\n\n**Which issue(s) this PR fixes**:\n\nFixes #69687\n\n**Special notes for your reviewer**:\nThe PR is based on changes made in https://github.com/kubernetes/kubernetes/pull/61423\n\n1. Scale to/from zero changes are made\n2. Applied changes from https://github.com/kubernetes/kubernetes/pull/61423\n3. HPA continues to scale as long as there is at least one metric value available. \nThere is no conservative scale down behaviour introduced in https://github.com/kubernetes/kubernetes/pull/61423\nScaling down works even if we have jus", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "API LGTM\n\n@mwielgus ping me when LGTM and I will approve", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "lgtm", + "sig-api-machinery", + "release-note", + "sig-autoscaling", + "size-xl", + "kind-api-change", + "kind-feature", + "sig-apps", + "approved", + "cncf-cla--yes" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/74526", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 25, + "comments": 74 + }, + "security": { + "scannedAt": "2026-02-27T17:44:31.491Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-74840-kube-proxy-drop-packets-in-invalid-state.json b/solutions/cncf-generated/kubernetes/kubernetes-74840-kube-proxy-drop-packets-in-invalid-state.json new file mode 100644 index 00000000..4fa3bea3 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-74840-kube-proxy-drop-packets-in-invalid-state.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:37.133Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: kube-proxy: Drop packets in INVALID state", + "description": "Fixes: #74839\n\n**What type of PR is this?**\n/kind bug\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\nFixes #74839\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nPackets considered INVALID by conntrack are now dropped. In particular, this fixes\na problem where spurious retransmits in a long-running TCP connection to a service\nIP could result in the connection being closed with the error \"Connection reset by\npeer\"\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/assign @thockin \n\nIs this testable is some way?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "kind-bug", + "sig-network", + "lgtm", + "release-note", + "size-s", + "approved", + "cncf-cla--yes", + "needs-priority" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/74840", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 21, + "comments": 42 + }, + "security": { + "scannedAt": "2026-02-27T17:44:37.133Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-74910-add-tls-support-for-nlb-fix-several-nlb-bugs.json b/solutions/cncf-generated/kubernetes/kubernetes-74910-add-tls-support-for-nlb-fix-several-nlb-bugs.json new file mode 100644 index 00000000..92f63c2d --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-74910-add-tls-support-for-nlb-fix-several-nlb-bugs.json @@ -0,0 +1,57 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:28.844Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: add TLS support for NLB / fix several NLB bugs", + "description": "**What type of PR is this?**\n/kind feature\n\n**What this PR does / why we need it**:\nAdd [TLS support](https://aws.amazon.com/blogs/aws/new-tls-termination-for-network-load-balancers/) for NLB\nFix several NLB bugs(around targetGroup naming/tagging)\n\n**Which issue(s) this PR fixes**:\n\nFixes #73297\nFixes #69264\nFixes #75006\n\n**Special notes for your reviewer**:\n1. new targetGroups will get name k8s-{namespace:8}-{name:8}-{uuid:10}. \n2. TLS is an upgrade version of SSL protocol, in CLB, both SSL/TLS is identified as `SSL`, however, in ALB/NLB, both SSL/TLS is identified as `TLS`. To avoid confusing and ease migration from CLB to NLB, `service.beta.kubernetes.io/aws-load-balancer-backend-protocol:ssl` is re-used for denoting backend SSL in NLB as well.\n3. Test done:\n * migration from TCP to TLS termination:\n 1. create NLB service with TCP port(443), which forward to backend HTTPS port(443). \n 1. access the NLB, observed TLS termination at backend works fine(cert by backend)", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/assign @micahhausler", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/assign @micahhausler \n/test pull-kubernetes-e2e-gce-100-performance\nDo we know when this bug will be fixed? We are interested in installing Istio on our AWS EKS with multiple NLBs --> Fixes #69264\nAny updates? I'm also interested in this feature.\nAny update or timeline for this getting merged?\n@gnufied @jsafrane ?\n/bump\n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *M00nF1sh*, *micahhausler*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=kubernetes%2Fkubernetes).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[pkg/cloudprovider/providers/aws/OWNERS](https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/aws/OWNERS)~~ [micahhausler]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n/test pull-kubernetes-kubemark-e2e-gce-big\nWhat are the chances this'll make its way into a 1.11.x release?\nDragging in on the mail chain.\n> What are the chances this'll make its way into a 1.11.x release?\r\n\r\nHi, I'll check whether there are merge conflicts and cherry pick this back 😄 \nSorry, cherrypicks are only for bugfixes not features\nTo me, this seems like a bit of both. So which version will have these changes then?\n+1 What version is this going into?\n@tnachen Hi, this will go into v1.15 😄 \nThanks for the response, @M00nF1sh. I guess it's time to put the heat on the kops team to catch up then :)\nThanks @M00nF1sh for working on this.\nHi @M00nF1sh and @micahhausler!\r\nDo you know why this has not been released?\n> Hi @M00nF1sh and @micahhausler!\r\n> Do you know why this has not been released?\r\n\r\n@igorvpcleao Hi, this is already available in k8s v1.15.\nCan someone confirm this has been released in 1.15 please ?\r\nBecause I am having the #69264 issue (`DuplicateTargetGroupName`) when trying to configure Istio NLB for 2 clusters in the same zone with the following versions :" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "lgtm", + "area-cloudprovider", + "release-note", + "size-l", + "kind-feature", + "approved", + "cncf-cla--yes", + "sig-cloud-provider", + "needs-priority" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Service", + "Namespace" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/74910", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 32, + "comments": 25 + }, + "security": { + "scannedAt": "2026-02-27T17:44:28.844Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-75831-support-mixed-protocol-lbs.json b/solutions/cncf-generated/kubernetes/kubernetes-75831-support-mixed-protocol-lbs.json new file mode 100644 index 00000000..133d306d --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-75831-support-mixed-protocol-lbs.json @@ -0,0 +1,58 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:22.904Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Support mixed protocol LBs", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**What this PR does / why we need it**:\n\nThis PR adds support for configuring a service of type LoadBalancer with more than 1 protocol. For example, this allows configuring a LoadBalancer with both TCP and UDP port 53 (for a DNS server), or TCP and UDP 443 for HTTPS + QUIC (HTTP 3.0).\n\n**Which issue(s) this PR fixes**:\n\nFixes #23880\n\n**Special notes for your reviewer**:\n\nMixed protocol LBs are supported by Azure and MetalLB. Other providers MAY support this, however, I'm unable to verify the implementations and have opted to reject mixed protocol LBs on these CPIs where necessary (this turned out to only need rejection in the GCP provider)\n\nAn example service to enable and use this feature:\n\n apiVersion: v1\n kind: Service\n metadata:\n name: mixed-protocol\n spec:\n type: LoadBalancer\n ports:\n - name: dns-udp\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n/kind bug\n\n**What this PR does / why we need it**:\nfix mixed protocol issue for azure load balancer, with below config (`service.beta.kubernetes.io/azure-load-balancer-mixed-protocols: \"true\"`), azure provider will create both TCP and UDP rules for the service.\n```\napiVersion: v1\nkind: Service\nmetadata:\n annotations:\n service.beta.kubernetes.io/azure-load-balancer-mixed-protocols: \"true\"\n name: web\n namespace: default\nspec:\n ports:\n - port: 80\n protocol: TCP\n targetPort: 80\n selector:\n app: web\n sessionAffinity: None\n type: LoadBalancer\n```\n\nWith this PR, you could see below both TCP and UDP rules are created for the service:\n![image](https://user-images.githubusercontent.com/4178417/52937705-da2eef80-339a-11e9-8d4f-9578c8cb1d0f.png)\n\n**Which issue(s) this PR fixes**:\n\nFixes #73849\n\n**Special notes for your reviewer**:\nOriginal PR(https://github.com/kubernetes/kubernetes/pull/67986) is not completed, I have no idea why I submitted a non-completed PR at that time...\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nfix mixed protocol issue for azure load balancer\n```\n\n/kind bug\n/assign @feiskyer \n/priority important-soon\n/sig azure", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\r\n\r\n**What type of PR is this?**\r\n/kind bug\r\n\r\n**What this PR does / why we need it**:\r\nfix mixed protocol issue for azure load balancer, with below config (`service.beta.kubernetes.io/azure-load-balancer-mixed-protocols: \"true\"`), azure provider will create both TCP and UDP rules for the service.", + "With this PR, you could see below both TCP and UDP rules are created for the service:\r\n![image](https://user-images.githubusercontent.com/4178417/52937705-da2eef80-339a-11e9-8d4f-9578c8cb1d0f.png)\r\n\r\n**Which issue(s) this PR fixes**:\r\n\r\nFixes #73849\r\n\r\n**Special notes for your reviewer**:\r\nOriginal PR(https://github.com/kubernetes/kubernetes/pull/67986) is not completed, I have no idea why I submitted a non-completed PR at that time...\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "area-cloudprovider", + "release-note", + "needs-rebase", + "size-l", + "kind-feature", + "sig-apps", + "cncf-cla--yes", + "sig-cloud-provider", + "ok-to-test", + "needs-priority" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/75831", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 43, + "comments": 33 + }, + "security": { + "scannedAt": "2026-02-27T17:44:22.904Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-81557-fix-cronjob-missed-start-time-handling.json b/solutions/cncf-generated/kubernetes/kubernetes-81557-fix-cronjob-missed-start-time-handling.json new file mode 100644 index 00000000..c0552e53 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-81557-fix-cronjob-missed-start-time-handling.json @@ -0,0 +1,57 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:27.775Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Fix CronJob missed start time handling", + "description": "**What type of PR is this?**\n\n/kind bug\n/sig apps\n/area workload-api/cronjob\n/priority important-soon\n\n**What this PR does / why we need it**:\n\nThis removes the arbitrary limit of 100 missed start times and will instead always schedule a job when an execution was missed and the `Spec.StartingDeadlineSeconds` period has not yet passed (if set).\n\nTo prevent starting multiple jobs if multiple start times were missed, `Status.LastScheduleTime` is now set to the actual start time of the job instead of the original scheduled start time.\n\nThe \"missed starting window\" warning message is removed because it was broken (see kubernetes/kubernetes#73169).\n\n**Which issue(s) this PR fixes**:\n\nFixes #42649\nFixes #73169\n\n**Special notes for your reviewer**:\n\nSee #42649 for numerous examples of problems caused by the current arbitrary limit of 100 missed start times, which is easily hit in real-world usage. For example, with a cron job set to execute every minute, 101 minutes of controller downtime (due", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "kind-bug", + "priority-important-soon", + "release-note", + "needs-rebase", + "size-l", + "sig-apps", + "cncf-cla--yes", + "area-workload-api-cronjob", + "ok-to-test" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Job", + "Cronjob" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/81557", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 32, + "comments": 35 + }, + "security": { + "scannedAt": "2026-02-27T17:44:27.775Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-95301-automatically-remove-orphaned-pod-s-dangling-volumes.json b/solutions/cncf-generated/kubernetes/kubernetes-95301-automatically-remove-orphaned-pod-s-dangling-volumes.json new file mode 100644 index 00000000..56aff489 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-95301-automatically-remove-orphaned-pod-s-dangling-volumes.json @@ -0,0 +1,58 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:33.415Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Automatically remove orphaned pod's dangling volumes", + "description": "**What type of PR is this?**\n\n/kind bug\n\n**What this PR does / why we need it**:\nThis makes Kubelet automatically clean up dangling volumes for orphaned pods and also emits warnings when it does so. This fixes the endless log spam about orphaned volumes (`Orphaned pod found, but volume paths are still present on disk`) and replaces it with a single useful message (`Removed dangling volume directories for orphaned pod ` or `Removed dangling subpath directories for orphaned pod `). This still allows for debugging issues with volume reconstruction without annoying operators excessively. People are currently using highly unsafe scripts to work around this issue (like https://github.com/kubernetes/kubernetes/issues/60987#issuecomment-673915505).\n\nAs of Kubernetes 1.19 volume reconstruction still sometimes fails just with emptyDirs and secrets and no kubelet restarts.\n\nThis is a rewrite of #73799 which also deals with subpaths and uses more K8s utility code.\n\n**Which issue(s)", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Hi @lorenz. Thanks for your PR.\n\nI'm waiting for a [kubernetes](https://github.com/orgs/kubernetes/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://git.k8s.io/community/community-membership.md#member) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=kubernetes%2Fkubernetes).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\n/sig storage\n/ok-to-test\n/assign @msau42 @sjenning @derekwaynecarr \n/assign @jingxu97 \nI rebased onto current master, but all K8s tests somehow fail with a merge conflict.\n@kubernetes/sig-storage-pr-reviews \r\n\r\nUnmounting things out of band from volume manager is potentially unsafe, and not all volume types are mount points. I would prefer we spend time investigating the root cause of why these orphaned volumes are not getting cleaned up properly\n> I would prefer we spend time investigating the root cause of why these orphaned volumes are not getting cleaned up properly\r\n\r\n@msau42 that's what's been happening in the original https://github.com/kubernetes/kubernetes/issues/60987 report for the last 2.5 years, with little to no actually attention from the core development team :shrug: \nAdding more folks from the original pr\r\n/assign @gnufied @jsafrane \r\n\r\nI actually preferred the original pr #73799 that only removed empty directories. The challenge with unmounting here is that it can hang kubelet if the unmount blocks, and also now that we have csi, it's possible that volumes are not actually mounts and cleanup could be more involved than just an unmount.\r\n\r\n@zerkms I understand your frustration but there are so many different reasons why you can end up with orphaned volumes. It could be a bug in the plugin or a bug in kubelet. @gnufied has fixed a number of cleanup issues in csi over the last few releases. It would be really helpful if we could get kubelet logs at the time the pod was being deleted and unmounting was supposed to happen so that we can help figure out how we got into this state.\r\n\r\nI think from @lorenz's original pr, the issue looked related to reconstruction of flexvolumes, so maybe we should be investigating that at least for that particular issue.\r\n\r\n\nI have reproduced this on a fresh cluster with a fresh OS and only emptyDir based volumes (secrets, configmap, projected and emptyDir) without even restarting Kubelet in 1.19. I've also repoduced it with CSI and FlexVols, it's not just a single volume plugin that's broken, but it can happen to almost any of them.\r\n\r\nThis change is basically orthagonal to improving volume reconstruction and deals with cases where that fails as best as it can instead of spamming the operators (which can generally not fix broken reconstruction for internal volume plugins). People who actually want to improve volume reconstruction now actually get better logs since this PR makes Kubelet log exactly one warning at the exact time when the orphaned volumes were detected and cleaned up and also explicitly spells out the pod name whereas with the old system logged an unlimited amount of messages but only logged the first pod where volumes got orphaned unless you ran with an extremely high verbosity.\r\n\r\nThe only issue I see with this PR is that unmounting might hang. If you have volumes that aren't mounts (which is unusual) this solution is not worse than the status quo, it will just fail to remove the mount dir since there's content in there.\r\nI'm fine with dropping the unmounting, but the way I'm doing it now imposes the least maintenance burden since it reuses most code for accessing volumes and subpaths.\nSince you can easily repro the issue with emptydir, can you provide repro steps and kubelet logs? It sounds like the problem is more widespread so it would be really helpful to get to the bottom of it.\nSadly the reproduction on the fully-clean setup depends on not-yet-released software and is highly probabilistic (happens in stress tests of that system at a rate of a bit under 0.1% of pods). I do have logs of these however.\r\n\r\nThe FlexVol repro is much easier, just abort a running system (pull the power, NMI it, let the Kernel crash) and restart Kubelet.\nLogs from the fully-clean setup:" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "kind-bug", + "priority-important-soon", + "area-kubelet", + "lgtm", + "sig-storage", + "sig-node", + "release-note", + "size-l", + "approved", + "cncf-cla--yes" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod", + "Secret" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/95301", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 24, + "comments": 49 + }, + "security": { + "scannedAt": "2026-02-27T17:44:33.415Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-95981-enables-http-2-health-check.json b/solutions/cncf-generated/kubernetes/kubernetes-95981-enables-http-2-health-check.json new file mode 100644 index 00000000..320b4bf2 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-95981-enables-http-2-health-check.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:26.511Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Enables HTTP/2 health check", + "description": "This PR enables the HTTP/2 connection health check for all the clients that call `SetTransportDefaults`. This includes all clients created via k8s.io/client-go. Without the health check, a broken transport layer connection can stay in the HTTP/2 connection pool for a long time, causing requests to the same host to fail. For example, a broken TCP connection can linger for [15 minutes ](https://pracucci.com/linux-tcp-rto-min-max-and-tcp-retries2.html)before being closed.\n\nThe HTTP/2 feature exposes two parameters,\n* `ReadIdleTimeout`: if the HTTP/2 client has not received any frame from the connection for this amount of time, it starts sending periodic pings to the server. The period of the pings is also `ReadIdleTimeout`.\n* `PingTimeout`: if an ACK to the ping is not received by the client after this amount of time, the connection is considered broken and will be closed by the client.\n\nFor Kubernetes, I default the `ReadIdleTimeout` to 30s, which I think is not going to cause performanc", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Taint lasts for the tcp keepalive period.\nTaint code only works if client is remembering to properly close its\nconnection.\nTested using ifconfig down to break the connection and prevent tcp close\nfrom the OS. (kill -9 and similar do not work).\nThis does NOT close tunnels but instead relies on the already\nimplemented TCP keepalive for that functionality.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "kind-bug", + "priority-important-soon", + "area-apiserver", + "area-kubectl", + "lgtm", + "area-cloudprovider", + "sig-node", + "sig-api-machinery", + "sig-cluster-lifecycle", + "release-note" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/95981", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 35, + "comments": 44 + }, + "security": { + "scannedAt": "2026-02-27T17:44:26.512Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-96087-move-all-bash-custom-completions-to-go.json b/solutions/cncf-generated/kubernetes/kubernetes-96087-move-all-bash-custom-completions-to-go.json new file mode 100644 index 00000000..44ee02db --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-96087-move-all-bash-custom-completions-to-go.json @@ -0,0 +1,66 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:36.080Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Move all bash custom completions to Go", + "description": "**What type of PR is this?**\n\n/kind feature\n\n**What this PR does / why we need it**:\n\nBased on #93714, this PR finishes removing all bash completion scripting and replaces it fully with Go completions.\n\n*Advantages:*\n* easier maintenance of custom completions\n* ability to write Go tests for custom completions\n* allow to eventually move to native zsh completion\n* allow Fish shell completion PR (#92989) to fully support all of kubectl's custom completions\n* removes lack of portability of bash scripts\n* will allow to improve existing custom completions and add other ones\n\n**Which issue(s) this PR fixes**:\n\nFixes https://github.com/kubernetes/kubectl/issues/882\n\n**Special notes for your reviewer**:\n\nThe PR is based on top of @knight42 great work of #93714. However, it has been mostly refactored when included with the rest of the PR.\n\nThe pattern that the PR follows is:\n- main completion functions are added in their matching files:\n - completion of api resource names: `apiresources.go#C", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n\n/kind feature\n\n**What this PR does / why we need it**:\n\nImplement custom completions using Go for flag `--context`, `--cluster`, `--user` and `--namespace`.\n\n**Which issue(s) this PR fixes**:\n\nPart of https://github.com/kubernetes/kubectl/issues/882\n\n**Special notes for your reviewer**:\n\n### Test the new completion code\n\n1. build kubectl from source\n```sh\nmake WHAT=cmd/kubectl\n```\n\n2. generate completion code\n```sh\n# zsh\nsource <(./_output/bin/kubectl completion zsh)\n\n# bash\nsource <(./_output/bin/kubectl completion bash)\n```\n\n3. play with it\n\nPlease note that the new completion code requires the new kubectl.\n\n```\n./_output/bin/kubectl get --context [TAB]\n\n# debug\n./_output/bin/kubectl __complete get --context ''\n```\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```\n\n**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:\n\n```docs\n\n```", + "steps": [ + "build kubectl from source", + "generate completion code", + "play with it" + ], + "codeSnippets": [ + "make WHAT=cmd/kubectl", + "# zsh\r\nsource <(./_output/bin/kubectl completion zsh)\r\n\r\n# bash\r\nsource <(./_output/bin/kubectl completion bash)", + "./_output/bin/kubectl get [TAB]\r\n# debug\r\n./_output/bin/kubectl __complete get \"\"\r\n\r\n./_output/bin/kubectl describe -n kube-system pod [TAB]" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "area-kubectl", + "lgtm", + "release-note", + "size-xl", + "kind-feature", + "approved", + "sig-cli", + "cncf-cla--yes", + "ok-to-test", + "needs-priority" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Namespace", + "Clusterrole", + "Clusterrolebinding", + "Role", + "Rolebinding" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/96087", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 21, + "comments": 34 + }, + "security": { + "scannedAt": "2026-02-27T17:44:36.081Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-97743-support-m1-macbooks-darwin-arm64-on-the-client-side.json b/solutions/cncf-generated/kubernetes/kubernetes-97743-support-m1-macbooks-darwin-arm64-on-the-client-side.json new file mode 100644 index 00000000..c50403cb --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-97743-support-m1-macbooks-darwin-arm64-on-the-client-side.json @@ -0,0 +1,57 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:17.346Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Support M1 MacBooks darwin/arm64 on the client-side", + "description": "Please test using:\n```\nbuild/run.sh make generated_files && make quick-release-images\n```\n\nSigned-off-by: Davanum Srinivas \n\n**What type of PR is this?**\n\n/kind feature\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\nFixes #97550\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nAdd support to generate client-side binaries for new darwin/arm64 platform\n```\n\n**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:\n\n```docs\n\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "- Code change: this pr or as [inductor comments](https://gist.github.com/inductor/d944f90cb277d077fb8b737ff74b9cf2)\n- [GO 1.16beta +](https://github.com/golang/go/compare/go1.16beta1...master) build go locally\n- etcd doesn't support arm ? https://github.com/etcd-io/etcd/pull/12557/files\n\n/kind feature\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\nFixes #97550\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\n\n```\n\n**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:\n```docs\n\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "build/run.sh make generated_files && make quick-release-images", + "**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:\r\n\r\n", + "- Code change: this pr or as [inductor comments](https://gist.github.com/inductor/d944f90cb277d077fb8b737ff74b9cf2)\r\n- [GO 1.16beta +](https://github.com/golang/go/compare/go1.16beta1...master) build go locally\r\n- etcd doesn't support arm ? https://github.com/etcd-io/etcd/pull/12557/files\r\n\r\n\r\n/kind feature\r\n\r\n**What this PR does / why we need it**:\r\n\r\n**Which issue(s) this PR fixes**:\r\n\r\nFixes #97550\r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Does this PR introduce a user-facing change?**:" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "area-test", + "priority-important-soon", + "lgtm", + "release-note", + "size-m", + "kind-feature", + "area-release-eng", + "approved", + "cncf-cla--yes", + "sig-testing" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/97743", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 49, + "comments": 57 + }, + "security": { + "scannedAt": "2026-02-27T17:44:17.346Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-98946-upgrade-kustomize-in-kubectl-to-v4-0-5.json b/solutions/cncf-generated/kubernetes/kubernetes-98946-upgrade-kustomize-in-kubectl-to-v4-0-5.json new file mode 100644 index 00000000..dc223ab1 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-98946-upgrade-kustomize-in-kubectl-to-v4-0-5.json @@ -0,0 +1,57 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:20.140Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Upgrade kustomize-in-kubectl to v4.0.5", + "description": "**What type of PR is this?**\n\n/kind bug\n/kind cleanup\n/sig cli\n/priority important-soon\n\n**What this PR does / why we need it**:\n\nFixes kubernetes-sigs/kustomize#1500\nFixes kubernetes/kubectl#818\n\n**Special notes for your reviewer**:\n\nThis PR has these commits:\n\n> 1. Deletion of `staging/src/k8s.io/cli-runtime/pkg/kustomize/k8sdeps`.\n> 1. Manual edits to these five files under `staging/src/k8s.io/`:\n> - `cli-runtime/pkg/resource/builder.go`\n> - `cli-runtime/pkg/resource/kustomizevisitor.go`\n> - `cli-runtime/pkg/resource/kustomizevisitor_test.go`\n> - `cli-runtime/pkg/resource/visitor.go`\n> - `kubectl/pkg/cmd/kustomize/kustomize.go`\n> 1. Runs of `update-vendor`, `lint-dependencies`, `pin-dependency`, `update-internal-modules` (till linter succeeded).\n\nOnly the second commit has manual code edits. The change in `kustomizevisitor_test.go` is due to a bug fix (kubernetes-sigs/kustomize#1899).\n\nDeletion of the old code results in many dependency upgrades, and associate", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What type of PR is this?**\n\n/kind cleanup\n\n**What this PR does / why we need it**: Update go-openapi to a version with https://github.com/go-openapi/spec/issues/138 fixed, so init() times of any importer of Kubernetes libraries is not slow\n\n**Which issue(s) this PR fixes**:\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n-->\n```release-note\nNONE\n```\n\n**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:\n\n```docs\n\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\r\n\r\n**What type of PR is this?**\r\n\r\n/kind cleanup\r\n\r\n**What this PR does / why we need it**: Update go-openapi to a version with https://github.com/go-openapi/spec/issues/138 fixed, so init() times of any importer of Kubernetes libraries is not slow\r\n\r\n**Which issue(s) this PR fixes**:\r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n-->", + "**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:\r\n\r\n", + "/remove-sig instrumentation\r\n/remove-sig node\r\n/assign @dims\r\n\n/remove-sig api-machinery\r\n/remove-sig cluster-lifecycle\r\n/remove-sig network\r\n/remove-sig cloud-provider\r\n\nThanks @ehashman , love the sony mobile unit.\r\nThanks @dims, open to any suggestions for getting those tasks to pass.\r\n\n@soltysh , @seans3 , @eddiezane FYI - was going to mention in today's sig-cli meeting (kustomize standup), but meeting was cancelled\r\n\nLooks like `github.com/yujunz/go-getter` is a HUGE magnet for new dependencies ( cc @liggitt )\r\n\r\n![image](https://user-images.githubusercontent.com/23304/107549936-68ab6580-6b9e-11eb-82e2-f584aad1396c.png)\r\n\n@dims, Yes, I'm already working on removing it - what it does isn't vital.\r\nI'll need a couple of days, because I have to do that then ship new kustomize libs,\r\nthen update the deps in this PR.\r\n\r\n\r\n\nExcellent news thanks @monopole !\nWell, 'isn't vital' is in the eyes of the user.\r\n\r\nThe code in question allows one to fetch KRM from local files, github, gitlab, mercurial, http, S3, GCP, DNA, whale song, etc. - hence the large number of deps. Some people like this (not sure how many). \r\n\r\nOne can isolate this in a plugin (help wanted btw), so there's a path forward after falling back to a minimal fetch from local files and whatever arguments `git clone` allows.\r\n\r\n\r\n\n@monopole I remember that some time ago you've mentioned the possibility of splitting the core capabilities of kustomize which we could embed in kubectl and the remaining bits building on top and shipped as separate kustomize binary. So having read what you said in previous comment I'd be in favor of having slimmer and less-functional but up-to-date kustomize in kubectl. \n@soltysh @dims The go-getter is gone.\r\n\nlooks way better\r\n\r\n![image](https://user-images.githubusercontent.com/23304/107865325-26e71d00-6e33-11eb-8164-9251bef018e7.png)\r\n\nplease see the dependencies CI job for instructions to update a few more things. looks great!\n@monopole one last thing ... looks like the `starlark` stuff added in https://github.com/kubernetes-sigs/kustomize/pull/2470 may be something optional. right? if so could we strip that out too?" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "kind-bug", + "area-apiserver", + "area-kubectl", + "lgtm", + "area-cloudprovider", + "sig-storage", + "sig-node", + "sig-api-machinery", + "sig-cluster-lifecycle", + "release-note" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [], + "difficulty": "expert", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/98946", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 46, + "comments": 57 + }, + "security": { + "scannedAt": "2026-02-27T17:44:20.140Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-99561-remove-bazel.json b/solutions/cncf-generated/kubernetes/kubernetes-99561-remove-bazel.json new file mode 100644 index 00000000..471fea1d --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-99561-remove-bazel.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:14.618Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubernetes: Remove Bazel", + "description": "#### What type of PR is this?\n\n/kind cleanup\n\n#### What this PR does / why we need it:\n\nRemoves the bazel build system, reducing Kubernetes to maintaining one build system for this repo per: https://github.com/kubernetes/enhancements/issues/2420\n\n#### Which issue(s) this PR fixes:\n\nFixes https://github.com/kubernetes/kubernetes/issues/88553\n\n#### Special notes for your reviewer:\n\nThe third commit \"`hack/update-bazel.sh`\" is fully automated, and is an enourmous diff. I recommend reviewing this PR commit by commit, skipping that one. The first commit implements the automation to produce this commit. \n\nExplicitly holding this PR, we need to send out a widespread announcement including additional notice of this change and instructions for how contributors should handle this. We need to wait before merging this (will send that tomorrow) given the scope of the change.\n\nThis PR also will not merge due to `pull-kubernetes-bazel-build` and `pull-kubernetes-bazel-test` which still running on all", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/cc @dims @liggitt @spiffxp", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "#### Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.:\r\n\r\n", + "/sig testing\r\n/sig release\r\n(this is going to tag all the SIGs anyhow, since it touches nearly all directories, we should probably remove those?)\n\n/cc @dims @liggitt @spiffxp \n/triage accepted\n/cc\n/cc\nChanges in my last push:\r\n- added `${KUBE_ROOT}/build/root/BUILD.root` to auto-removed files logic in first commit\r\n- regenerated the `hack/update-bazel.sh` commit after this\r\n - `find . -name 'BUILD.*'` now reports nothing.\r\n- added an additional commit to manually drop `OWNERS`, `README.md` from directories that formerly contained bazel files and now contain nothing else, cleaning up these dangling directories.\r\n\r\n\r\n\nhttps://prow.k8s.io/view/gs/kubernetes-jenkins/pr-logs/pull/99561/pull-kubernetes-verify/1366165916764606464\r\none verify failure fixed by the latest commit. (`hack/verify-openapi-spec.sh` was copying `BUILD` specifically into the directory to generate in)\r\n\r\nThe `pull-kubernetes-conformance-image-test` failures are not related to this PR, building the image is flaking on pulling the base image, which is an unrelated issue. It's also not a required test.\n@BenTheElder: The following tests **failed**, say `/retest` to rerun all failed tests:\n\nTest name | Commit | Details | Rerun command\n--- | --- | --- | ---\npull-kubernetes-conformance-image-test | d296698b1a0a17bff5c31a49636c0786089e3e05 | [link](https://prow.k8s.io/view/gcs/kubernetes-jenkins/pr-logs/pull/99561/pull-kubernetes-conformance-image-test/1366175762108911616/) | `/test pull-kubernetes-conformance-image-test`\npull-kubernetes-e2e-gci-gce-ipvs | d296698b1a0a17bff5c31a49636c0786089e3e05 | [link](https://prow.k8s.io/view/gcs/kubernetes-jenkins/pr-logs/pull/99561/pull-kubernetes-e2e-gci-gce-ipvs/1366175765393051648/) | `/test pull-kubernetes-e2e-gci-gce-ipvs`\npull-kubernetes-e2e-gce-iscsi | d296698b1a0a17bff5c31a49636c0786089e3e05 | [link](https://prow.k8s.io/view/gcs/kubernetes-jenkins/pr-logs/pull/99561/pull-kubernetes-e2e-gce-iscsi/1366175770992447488/) | `/test pull-kubernetes-e2e-gce-iscsi`\npull-kubernetes-e2e-ubuntu-gce-network-policies | d296698b1a0a17bff5c31a49636c0786089e3e05 | [link](https://prow.k8s.io/view/gs/kubernetes-jenkins/pr-logs/pull/99561/pull-kubernetes-e2e-ubuntu-gce-network-policies/1366175764566773760) | `/test pull-kubernetes-e2e-ubuntu-gce-network-policies`\n\n[Full PR test history](https://prow.k8s.io/pr-history?org=kubernetes&repo=kubernetes&pr=99561). [Your PR dashboard](https://prow.k8s.io/pr?query=is%3Apr%20state%3Aopen%20author%3ABenTheElder). Please help us cut down on flakes by [linking to](https://git.k8s.io/community/contributors/devel/sig-testing/flaky-tests.md#filing-issues-for-flaky-tests) an [open issue](https://github.com/kubernetes/kubernetes/issues?q=is:issue+is:open) when you hit one in your PR.\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository. I understand the commands that are listed [here](https://go.k8s.io/bot-commands).\n
\n\n/skip\r\n[bypassing flaky optional tests, we're triggering ~every possible test since this touches every directory]\nreviewed locally since github won't show the full diff\r\n\r\n/approve\r\n\r\nlgtm once the bazel CI jobs are dropped from master\n/skip\npull-kubernetes-verify shows running the conformance verifications:" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "area-test", + "priority-critical-urgent", + "sig-network", + "area-kubelet", + "kind-cleanup", + "sig-scalability", + "sig-scheduling", + "area-apiserver", + "area-kubectl", + "lgtm" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubernetes/kubernetes/pull/99561", + "sourceRepo": "kubernetes/kubernetes", + "reactions": 87, + "comments": 27 + }, + "security": { + "scannedAt": "2026-02-27T17:44:14.618Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubescape/kubescape-1148-arm64-release-binaries-for-ci-and-krew.json b/solutions/cncf-generated/kubescape/kubescape-1148-arm64-release-binaries-for-ci-and-krew.json new file mode 100644 index 00000000..3dd75a2a --- /dev/null +++ b/solutions/cncf-generated/kubescape/kubescape-1148-arm64-release-binaries-for-ci-and-krew.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:53.563Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubescape: arm64 release binaries for CI and Krew", + "description": "Resolve #195 #1112 \n## Overview\n\nAdd arm64 release binaries (both Linux and macOS) to release CI: kubescape-arm64-\\, as well as Krew.\n\nmacOS arm64 tests are not available in the CI.\n\nBinaries to try out: https://github.com/HollowMan6/kubescape/releases/tag/v2.2.5", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Overview\nWait for new release of the following PR:\n- https://github.com/kubescape/kubescape/pull/1169\n- https://github.com/kubescape/kubescape/pull/1148", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ file ./kubescape-arm64-macos-latest version\r\n./kubescape-arm64-macos-latest: Mach-O 64-bit executable arm64\r\n\r\n$ ./kubescape-arm64-macos-latest version\r\nYour current version is: v2.2.5 [git enabled in build: true]" + ] + } + }, + "metadata": { + "tags": [ + "kubescape", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kubescape" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubescape/kubescape/pull/1148", + "sourceRepo": "kubescape/kubescape", + "reactions": 1, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:44:53.563Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubescape/kubescape-1299-chore-update-docs-build-ps1.json b/solutions/cncf-generated/kubescape/kubescape-1299-chore-update-docs-build-ps1.json new file mode 100644 index 00000000..b9bcdf6f --- /dev/null +++ b/solutions/cncf-generated/kubescape/kubescape-1299-chore-update-docs-build-ps1.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:55.531Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubescape: chore: update docs build.ps1", + "description": "## Overview\n\nAs mentioned in the issue, this PR replaces `build.bat` with `build.ps1`\n\n## Additional Information\n\nThis is a continuation PR of https://github.com/kubescape/kubescape/pull/1279 and these doc changes were mentioned there\n\n## Related issues/PRs:\n\nResolved #1298 \n\n## Checklist before requesting a review\n\n- [x] My code follows the style guidelines of this project\n- [x] I have commented on my code, particularly in hard-to-understand areas\n- [x] I have performed a self-review of my code\n- [x] If it is a core feature, I have added thorough tests.\n- [x] New and existing unit tests pass locally with my changes", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@HollowMan6 amazing!\n\nI don't work that often with windows tooling, could you elaborate a bit as to exactly what the problem was and how you were able to fix it and how you got the intuition of the fix?\n\nEven looking at the files changed, I can't seem to figure out exactly what was the problem in the script?\n\nThanks!", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubescape", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kubescape" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubescape/kubescape/pull/1299", + "sourceRepo": "kubescape/kubescape", + "reactions": 0, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:44:55.531Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubescape/kubescape-1311-feat-improve-pretty-logger.json b/solutions/cncf-generated/kubescape/kubescape-1311-feat-improve-pretty-logger.json new file mode 100644 index 00000000..bb70ba69 --- /dev/null +++ b/solutions/cncf-generated/kubescape/kubescape-1311-feat-improve-pretty-logger.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:54.528Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubescape: feat: improve pretty logger", + "description": "## Overview\n\nThis PR improves the pretty output of the informative statements.\n\n## Additional Information\n\nThe spinner package has been added here and integrated with the logger itself, and the redirect output has also been correctly handled.\n\n## How to Test\n\nRun any kubescape command such as `kubescape scan resource.yaml`\n\n## Examples/Screenshots\n\n![image](https://github.com/kubescape/go-logger/assets/81813720/11f33743-1c99-4e24-ad88-d88b6531b220)\n![image](https://github.com/kubescape/kubescape/assets/81813720/fb2f4d00-4e12-4220-a430-affc271308ba)\n\n## Related issues/PRs:\n\nResolved #1294 \nResolved https://github.com/kubescape/go-logger/issues/11\n\n## Checklist before requesting a review\n\n- [x] My code follows the style guidelines of this project\n- [x] I have commented on my code, particularly in hard-to-understand areas\n- [x] I have performed a self-review of my code\n- [x] If it is a core feature, I have added thorough tests.\n- [x] New and existing unit tests pass locally with my chang", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@craigbox I've made all the discussed and required changes and also added new log statements where I could find the need, though feel free to let me know if I've missed any spots and I'll make the updates ;)\n\nAlso I've linked the PR at `go-logger` as well so you can have a look there too.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "=== RUN TestProcessResourcesResult\r\n\tgithub.com/kubescape/kubescape/v2/core/pkg/opaprocessor\tcoverage: 50.5% of statements\r\npanic: test timed out after 10m0s\r\nrunning tests:\r\n\tTestProcessResourcesResult (9m59s)\r\n\r\ngoroutine 56 [running]:\r\ntesting.(*M).startAlarm.func1()\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/testing/testing.go:2241 +0x3c5\r\ncreated by time.goFunc\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/time/sleep.go:176 +0x32\r\n\r\ngoroutine 1 [chan receive, 9 minutes]:\r\ntesting.(*T).Run(0xc000b26b60, {0x478481c?, 0x179d825?}, 0x50da428)\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/testing/testing.go:1630 +0x405\r\ntesting.runTests.func1(0x73702c0?)\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/testing/testing.go:2036 +0x45\r\ntesting.tRunner(0xc000b26b60, 0xc000abfc80)\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/testing/testing.go:1576 +0x10b\r\ntesting.runTests(0xc000b219a0?, {0x70105c0, 0x7, 0x7}, {0xc000ae5f28?, 0x100c000ae5e70?, 0x736e8a0?})\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/testing/testing.go:2034 +0x489\r\ntesting.(*M).Run(0xc000b219a0)\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/testing/testing.go:1906 +0x63a\r\nmain.main()\r\n\t_testmain.go:92 +0x1c5\r\n\r\ngoroutine 21 [select]:\r\ngithub.com/golang/glog.(*fileSink).flushDaemon(0x736e6b8)\r\n\t/home/runner/go/pkg/mod/github.com/golang/glog@v1.1.1/glog_file.go:346 +0xcd\r\ncreated by github.com/golang/glog.init.1\r\n\t/home/runner/go/pkg/mod/github.com/golang/glog@v1.1.1/glog_file.go:161 +0x145\r\n\r\ngoroutine 5 [select]:\r\ngo.opencensus.io/stats/view.(*worker).start(0xc000602800)\r\n\t/home/runner/go/pkg/mod/go.opencensus.io@v0.24.0/stats/view/worker.go:292 +0xad\r\ncreated by go.opencensus.io/stats/view.init.0\r\n\t/home/runner/go/pkg/mod/go.opencensus.io@v0.24.0/stats/view/worker.go:34 +0x96\r\n\r\ngoroutine 41 [sync.Mutex.Lock, 9 minutes]:\r\nsync.runtime_SemacquireMutex(0x45bd6a0?, 0xd0?, 0xc000c1b780?)\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/runtime/sema.go:77 +0x26\r\nsync.(*Mutex).lockSlow(0xc000150f78)\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/sync/mutex.go:171 +0x165\r\nsync.(*Mutex).Lock(...)\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/sync/mutex.go:90\r\ngithub.com/kubescape/go-logger/prettylogger.(*PrettyLogger).print(0xc000150f60, 0x3, {0x476b097, 0x13}, {0x0, 0x0, 0x0})\r\n\t/home/runner/go/pkg/mod/github.com/kubescape/go-logger@v0.0.16/prettylogger/logger.go:79 +0xe5\r\ngithub.com/kubescape/go-logger/prettylogger.(*PrettyLogger).Info(0xc000ddc800?, {0x476b097?, 0xc000134008?}, {0x0?, 0x0?, 0x0?})\r\n\t/home/runner/go/pkg/mod/github.com/kubescape/go-logger@v0.0.16/prettylogger/logger.go:58 +0x37\r\ngithub.com/kubescape/kubescape/v2/core/pkg/opaprocessor.(*OPAProcessor).updateResults(0xc000150f40, {0x560c370, 0xc000134008})\r\n\t/home/runner/work/kubescape/kubescape/core/pkg/opaprocessor/processorhandlerutils.go:33 +0x15e\r\ngithub.com/kubescape/kubescape/v2/core/pkg/opaprocessor.TestProcessResourcesResult(0xc000618000?)\r\n\t/home/runner/work/kubescape/kubescape/core/pkg/opaprocessor/processorhandler_test.go:208 +0x952\r\ntesting.tRunner(0xc000618340, 0x50da428)\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/testing/testing.go:1576 +0x10b\r\ncreated by testing.(*T).Run\r\n\t/opt/hostedtoolcache/go/1.20.6/x64/src/testing/testing.go:1629 +0x3ea\r\nFAIL\tgithub.com/kubescape/kubescape/v2/core/pkg/opaprocessor\t600.089s" + ] + } + }, + "metadata": { + "tags": [ + "kubescape", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kubescape" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubescape/kubescape/pull/1311", + "sourceRepo": "kubescape/kubescape", + "reactions": 0, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:44:54.528Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubescape/kubescape-1332-feat-add-kubescape-patch-command.json b/solutions/cncf-generated/kubescape/kubescape-1332-feat-add-kubescape-patch-command.json new file mode 100644 index 00000000..26c75c32 --- /dev/null +++ b/solutions/cncf-generated/kubescape/kubescape-1332-feat-add-kubescape-patch-command.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:50.920Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubescape: feat: add kubescape patch command", + "description": "## Overview\nAdd `kubescape patch` command\n\n## Additional Information\n1. The `kubescape patch` command can be used to patch container images with vulnerabilities.\n2. It uses [copa](https://github.com/project-copacetic/copacetic) and [buildkit](https://github.com/moby/buildkit) under the hood for patching the images, and [grype](https://github.com/anchore/grype) as its engine for scanning the images (at the moment)\n3. The detailed documentation for this command can be found [here](https://github.com/anubhav06/kubescape/tree/patch-cmd/cmd/patch#readme).\n\n## TODO\n\n1. Replace `anubhav06/copacetic` with `project-copacetic/copacetic`, when the copa team accepts the kubescape and copa integration support PR.\n\n## Usage\n\n```bash\nsudo buildkitd & \nsudo kubescape patch --image \n```\n> The patch command can also be run without sudo privileges. Refer to the documentation [here](https://github.com/anubhav06/kubescape/tree/patch-cmd/cmd/patch#readme).\n\n## Examples/Screenshots\n1. Run `sudo b", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@anubhav06 This feature looks really cool and works smoothly! I'm very excited for this!\n\nI have some general comments about the feature, which are mostly food for thought:\n\n1. We should add the image name to the `kubescape patch` follow-up step (which is printed at the end)\n2. Maybe we should add to the output some comparison between the old image and the new one, so it will become clear to the user the value he is getting from the `patch` command\n3. Do we need to print the entire copa output? I think is best to have it in debug mode only\n4. We shouldn't use the `-r` flag. We have the `-f` flag, where we can specify JSON output\n5. Maybe the commands should be `kubescape image scan` and `kubescape image patch`\n\ncc: @craigbox @dwertent", + "steps": [ + "We should add the image name to the `kubescape patch` follow-up step (which is printed at the end)", + "Maybe we should add to the output some comparison between the old image and the new one, so it will become clear to the user the value he is getting from the `patch` command", + "Do we need to print the entire copa output? I think is best to have it in debug mode only", + "We shouldn't use the `-r` flag. We have the `-f` flag, where we can specify JSON output", + "Maybe the commands should be `kubescape image scan` and `kubescape image patch`" + ], + "codeSnippets": [ + "sudo buildkitd & \r\nsudo kubescape patch --image " + ] + } + }, + "metadata": { + "tags": [ + "kubescape", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kubescape" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubescape/kubescape/pull/1332", + "sourceRepo": "kubescape/kubescape", + "reactions": 2, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:50.921Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubescape/kubescape-1393-use-go-gitlog-as-an-alternative-to-git2go.json b/solutions/cncf-generated/kubescape/kubescape-1393-use-go-gitlog-as-an-alternative-to-git2go.json new file mode 100644 index 00000000..7bda4928 --- /dev/null +++ b/solutions/cncf-generated/kubescape/kubescape-1393-use-go-gitlog-as-an-alternative-to-git2go.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:56.740Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubescape: use go-gitlog as an alternative to git2go", + "description": "@dwertent we should check if this is acceptable, since we require a `git` binary in the PATH\n(probably OK to assume if you want to scan a git repo, you should have git installed)\n\nbenchmark shows we allocate more memory, but we're faster:\n```\ngitlog\nBenchmarkBuildCommitMap-8 \t 5\t 206852705 ns/op\t13889608 B/op\t 87819 allocs/op\ngit2go\nBenchmarkBuildCommitMap-8 \t 4\t 310221194 ns/op\t 3428458 B/op\t 144074 allocs/op\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "
\n👇 Click on the image for a new way to code review\n\n\n####\n\n[![Review these changes using an interactive CodeSee Map](https://s3.us-east-2.amazonaws.com/maps.codesee.io/images/github/kubescape/kubescape/1393/ff5e47a3/ce068a1e8e0019e34f643499935c3c517a24c736.svg)](https://app.codesee.io/r/reviews?pr=1393&src=https%3A%2F%2Fgithub.com%2Fkubescape%2Fkubescape)\n\n#### Legend\n\"CodeSee\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "gitlog\r\nBenchmarkBuildCommitMap-8 \t 5\t 206852705 ns/op\t13889608 B/op\t 87819 allocs/op\r\ngit2go\r\nBenchmarkBuildCommitMap-8 \t 4\t 310221194 ns/op\t 3428458 B/op\t 144074 allocs/op", + "❗ Git scan skipped. error: failed to get commit information for file: bla.yaml" + ] + } + }, + "metadata": { + "tags": [ + "kubescape", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kubescape" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubescape/kubescape/pull/1393", + "sourceRepo": "kubescape/kubescape", + "reactions": 0, + "comments": 26 + }, + "security": { + "scannedAt": "2026-02-27T17:44:56.740Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubescape/kubescape-932-added-override-values-for-helm-feature.json b/solutions/cncf-generated/kubescape/kubescape-932-added-override-values-for-helm-feature.json new file mode 100644 index 00000000..55387d90 --- /dev/null +++ b/solutions/cncf-generated/kubescape/kubescape-932-added-override-values-for-helm-feature.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:51.922Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubescape: added override values for helm feature", + "description": "fixes #901\n\nWith merge of this PR kubescape will support a flag `--helm-value-file` which takes input of path/paths of value file seperated by `,` and override the default value file of helm.\n\nEDITED : \n\nnow the flag takes a string value seperated by space e.g. \n`--helm-value-file=path/to/chart1:path/to/value/file1,path/to/value/file2 path/to/chart2:path/to/value/path1.....\" `\n\nApproach:\n\nThe working of this feature is similiar to what `helm/helm` repo handles overriding however due to different structures I could not use those functions. ref https://github.com/helm/helm/blob/main/pkg/cli/values/options.go#L42", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "Update: fixing test cases", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubescape", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kubescape" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kubescape/kubescape/pull/932", + "sourceRepo": "kubescape/kubescape", + "reactions": 1, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:44:51.922Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubestellar/kubestellar-1156-use-realpath-to-see-through-symlinks.json b/solutions/cncf-generated/kubestellar/kubestellar-1156-use-realpath-to-see-through-symlinks.json new file mode 100644 index 00000000..b376667d --- /dev/null +++ b/solutions/cncf-generated/kubestellar/kubestellar-1156-use-realpath-to-see-through-symlinks.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:36.201Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubestellar: 🐛 Use realpath to see through symlinks", + "description": "## Summary\nThis PR injects usage of `realpath` into scripts that navigate from `$something/bin` to `$something/config` or `$something/core-helm-chart` because sometimes that `bin` is a symlink and the `config` or `core-helm-chart` is adjacent to where that symlink points.\n\nThis PR also removes a redundant variable holding the `bin` pathname.\n\n## Related issue(s)\n\nFixes #1152", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/retest", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubestellar", + "sandbox", + "orchestration", + "size-s", + "approved", + "lgtm", + "dco-signoff--yes" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubestellar" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kubestellar/kubestellar/pull/1156", + "sourceRepo": "kubestellar/kubestellar", + "reactions": 0, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:36.202Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubestellar/kubestellar-1667-added-workloadgeneration-field-to-the-downsync-objects-section-.json b/solutions/cncf-generated/kubestellar/kubestellar-1667-added-workloadgeneration-field-to-the-downsync-objects-section-.json new file mode 100644 index 00000000..f245ed4b --- /dev/null +++ b/solutions/cncf-generated/kubestellar/kubestellar-1667-added-workloadgeneration-field-to-the-downsync-objects-section-.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:35.303Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubestellar: 🐛 added `WorkloadGeneration` field to the downsync objects section of PlacementDecision", + "description": "This field is required in order to trigger reconciliation in cases where the list of objects to downsync hasn't changed (their identifiers hasn't changed), but spec of at least one of the objects has changed. in such cases we need to trigger distribution of the updates changes.\n\n## Summary\n\n## Related issue(s)\n\nFixes #1671", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/cc @pdettori \n/cc @MikeSpreitzer \n/cc @ezrasilvera \n/cc @vMaroon", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubestellar", + "sandbox", + "orchestration", + "approved", + "lgtm", + "size-m", + "dco-signoff--yes", + "pluggable-transport" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubestellar" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubestellar/kubestellar/pull/1667", + "sourceRepo": "kubestellar/kubestellar", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:35.303Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubestellar/kubestellar-186-added-edge-scheduler-to-quickstart.json b/solutions/cncf-generated/kubestellar/kubestellar-186-added-edge-scheduler-to-quickstart.json new file mode 100644 index 00000000..59181fbb --- /dev/null +++ b/solutions/cncf-generated/kubestellar/kubestellar-186-added-edge-scheduler-to-quickstart.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:39.620Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubestellar: 📖 added edge-scheduler to quickstart", + "description": "Signed-off-by: nitinmewar [nitinmewar28@gmail.com](mailto:nitinmewar28@gmail.com)\n## Summary\nlinked edge schedular docs to Quickstart and added table of content.\n\n## Related issue(s)\n\nFixes #165", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@MikeSpreitzer yes, your right. What is the right way to proceed? Do we need a quickstart in root? Or can we use just the one in the docs? I would prefer the docs version.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubestellar", + "sandbox", + "orchestration", + "needs-rebase", + "ok-to-test" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubestellar" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubestellar/kubestellar/pull/186", + "sourceRepo": "kubestellar/kubestellar", + "reactions": 0, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:39.620Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubestellar/kubestellar-254-reconciliation-triggered-by-synctarget.json b/solutions/cncf-generated/kubestellar/kubestellar-254-reconciliation-triggered-by-synctarget.json new file mode 100644 index 00000000..959bc07f --- /dev/null +++ b/solutions/cncf-generated/kubestellar/kubestellar-254-reconciliation-triggered-by-synctarget.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:38.494Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubestellar: ✨ Reconciliation triggered by SyncTarget", + "description": "## Summary\nThis PR implements parts of (those triggered by SyncTarget changes) edge-scheduler's reconciliation.\n\nFixes #246", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/hold", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubestellar", + "sandbox", + "orchestration", + "approved", + "lgtm" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubestellar" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubestellar/kubestellar/pull/254", + "sourceRepo": "kubestellar/kubestellar", + "reactions": 0, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:38.494Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubestellar/kubestellar-999-seedling-remove-the-dependency-of-core-kcp-io-v1alpha1-logicalcl.json b/solutions/cncf-generated/kubestellar/kubestellar-999-seedling-remove-the-dependency-of-core-kcp-io-v1alpha1-logicalcl.json new file mode 100644 index 00000000..a2b88c62 --- /dev/null +++ b/solutions/cncf-generated/kubestellar/kubestellar-999-seedling-remove-the-dependency-of-core-kcp-io-v1alpha1-logicalcl.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:37.549Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubestellar: :seedling: Remove the dependency of core.kcp.io/v1alpha1/logicalclusters from Syncer", + "description": "## Summary\n\nThis is the PR for #874.\n\n#### What's the changed\n\n- Remove the dependency of kcp logical cluster function from Syncer\n\nI reviewed Syncer code whether Syncer really depends on logical cluster id or not and concluded it's not required. I have removed the dependent codes on the logical cluster stuff from Syncer. As a result, Syncer does no longer require `--from-cluster` option.\n\n#### Verification\n\nUT passed\n```\n$ make test-syncer COUNT=1\ngo test -count 1 `go list ./... | grep \"/pkg/cliplugins\\|/pkg/syncer\"`\n? github.com/kubestellar/kubestellar/pkg/syncer [no test files]\n? github.com/kubestellar/kubestellar/pkg/syncer/clientfactory [no test files]\n? github.com/kubestellar/kubestellar/pkg/syncer/shared [no test files]\n? github.com/kubestellar/kubestellar/pkg/syncer/syncers [no test files]\nok github.com/kubestellar/kubestellar/pkg/cliplugins/kubestellar/syncer-gen 0.952s\nok github.com/kubestellar/kubestellar/pkg/syncer/control", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Summary\nThis PR builds on #999 by changing the prep-for-syncer script to use an image built from that PR.\n\nThis is for testing. DO NOT MERGE, we will want to actually use an image built from `main` after #999 merges.\n\n## Related issue(s)\n\nFixes #\n\n/cc @yana1205 \n/cc @clubanderson", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ make test-syncer COUNT=1\r\ngo test -count 1 `go list ./... | grep \"/pkg/cliplugins\\|/pkg/syncer\"`\r\n? github.com/kubestellar/kubestellar/pkg/syncer [no test files]\r\n? github.com/kubestellar/kubestellar/pkg/syncer/clientfactory [no test files]\r\n? github.com/kubestellar/kubestellar/pkg/syncer/shared [no test files]\r\n? github.com/kubestellar/kubestellar/pkg/syncer/syncers [no test files]\r\nok github.com/kubestellar/kubestellar/pkg/cliplugins/kubestellar/syncer-gen 0.952s\r\nok github.com/kubestellar/kubestellar/pkg/syncer/controller 18.689s", + "$ make e2e-test-kubestellar-syncer \r\nrm -rf /Users/yana/git/trl/kcp/kcp-edge-mc/.kcp/etcd-server\r\nrm -rf /Users/yana/git/trl/kcp/kcp-edge-mc/.kcp/.admin-token-store /Users/yana/git/trl/kcp/kcp-edge-mc/.kcp/admin.kubeconfig\r\nrm -rf /Users/yana/git/trl/kcp/kcp-edge-mc/.kcp/apiserver.* /Users/yana/git/trl/kcp/kcp-edge-mc/.kcp/sa.key\r\nexport PATH=/Users/yana/git/trl/kcp/kcp-edge-mc/kcp/bin:$PATH && \\\r\n kcp start --root-directory=/Users/yana/git/trl/kcp/kcp-edge-mc/.kcp > /Users/yana/git/trl/kcp/kcp-edge-mc/.kcp/kcp.log 2>&1 & PID=$! && echo \"PID $PID\" && \\\r\n trap 'kill -TERM $PID' TERM INT EXIT && \\\r\n while [ ! -f \"/Users/yana/git/trl/kcp/kcp-edge-mc/.kcp/admin.kubeconfig\" ]; do sleep 1; echo \"kcp is not ready. wait for 1s...\";done && \\\r\n echo 'Starting test(s)' && \\\r\n NO_GORUN=1 GOOS=darwin GOARCH=arm64 \\\r\n go test -race ./test/e2e/kubestellar-syncer/... \\\r\n --kcp-kubeconfig /Users/yana/git/trl/kcp/kcp-edge-mc/.kcp/admin.kubeconfig --suites kubestellar-syncer \\\r\n\r\nPID 6248\r\nkcp is not ready. wait for 1s...\r\nkcp is not ready. wait for 1s...\r\nkcp is not ready. wait for 1s...\r\nStarting test(s)\r\nok github.com/kubestellar/kubestellar/test/e2e/kubestellar-syncer 258.144s" + ] + } + }, + "metadata": { + "tags": [ + "kubestellar", + "sandbox", + "orchestration", + "approved", + "lgtm", + "size-l", + "dco-signoff--yes" + ], + "category": "troubleshooting", + "cncfProjects": [ + "kubestellar" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kubestellar/kubestellar/pull/999", + "sourceRepo": "kubestellar/kubestellar", + "reactions": 0, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:37.549Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-10015-usb-host-pass-through.json b/solutions/cncf-generated/kubevirt/kubevirt-10015-usb-host-pass-through.json new file mode 100644 index 00000000..c7ef44ce --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-10015-usb-host-pass-through.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:02.137Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: USB host pass through", + "description": "**What this PR does / why we need it**:\nThis PR allows USB devices that are plugged to the cluster's Nodes to be allocated to VMs running in said Nodes.\n\n### Staring with main components and a simple use case\n\nA `cluster admin` can define USB devices he wants to expose to VM using KubeVirt CRD, under permitted host devices\nA new CRD called NodeConfig is introduce to allow requiring USB devices. Note that [enabling](https://kubevirt.io/user-guide/operations/activating_feature_gates/) `HostDevices` featureGates is a requirement.\n\n```yaml\napiVersion: kubevirt.io/v1\nkind: KubeVirt\nmetadata:\n name: kubevirt\n namespace: kubevirt\nspec:\n configuration:\n permittedHostDevices:\n usb:\n - resourceName: kubevirt.io/storage\n selectors:\n - vendor: \"46f4\"\n product: \"0001\"\n developerConfiguration: \n featureGates:\n - HostDevices\n```\n\nWith the configuration above, USB devices of `46f4:0001` _(vendor:product)_ will be exposed under the reso", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is possible since:\n https://github.com/kubevirt/kubevirtci/pull/996\n\nThe follow CI would benefit from it:\n https://github.com/kubevirt/kubevirt/pull/10015", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: kubevirt.io/v1\r\nkind: KubeVirt\r\nmetadata:\r\n name: kubevirt\r\n namespace: kubevirt\r\nspec:\r\n configuration:\r\n permittedHostDevices:\r\n usb:\r\n - resourceName: kubevirt.io/storage\r\n selectors:\r\n - vendor: \"46f4\"\r\n product: \"0001\"\r\n developerConfiguration: \r\n featureGates:\r\n - HostDevices", + "spec:\r\n domain:\r\n devices:\r\n hostDevices:\r\n - deviceName: kubevirt.io/storage\r\n name: my-storage-usb-devices", + "(toso) $ export KUBEVIRT_PROVIDER=k8s-1.26-centos9\r\n(toso) $ export KUBEVIRT_TAG=latest\r\n(toso) $ export KUBEVIRT_PROVIDER_EXTRA_ARGS=\"--usb 20M --usb 30M --usb 40M\"\r\n(toso) $ make cluster-up\r\n...\r\n(toso) $ ./cluster-up/ssh.sh node01\r\n(node01) $ dmesg | grep -i idVendor=46f4\r\n[ 1.204437] usb 4-1: New USB device found, idVendor=46f4, idProduct=0001, bcdDevice= 0.00\r\n[ 1.204501] usb 3-1: New USB device found, idVendor=46f4, idProduct=0001, bcdDevice= 0.00\r\n[ 1.330900] usb 4-2: New USB device found, idVendor=46f4, idProduct=0001, bcdDevice= 0.00" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "release-note", + "size-xxl", + "kind-api-change", + "lgtm", + "approved", + "dco-signoff--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [ + "Namespace", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/10015", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 5, + "comments": 40 + }, + "security": { + "scannedAt": "2026-02-27T17:45:02.137Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-10244-virtctl-adm-and-logverbosity-command-initial-commit.json b/solutions/cncf-generated/kubevirt/kubevirt-10244-virtctl-adm-and-logverbosity-command-initial-commit.json new file mode 100644 index 00000000..15683edc --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-10244-virtctl-adm-and-logverbosity-command-initial-commit.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:14.491Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: virtctl: adm and logverbosity command initial commit", + "description": "**What this PR does / why we need it**:\n\nAdd `adm` subcommand under `virtctl`, and `log-verbosity` subcommand under `adm`.\n\n`virtctl adm log-verbosity [flags]`\n\nThe `log-verbosity` command is:\n- To show the log verbosity of one or more components (when the log verbosity is unattended in the KubeVirt CR, show the default verbosity)\n- To set the log verbosity of one or more components\n- To reset the log verbosity of all components (empty the log verbosity field, which means reset to the default verbosity)\n\nNote:\n- The components are ``\n- `Show` and `Set`/`Reset` cannot coexist\n- The verbosity value must be `0-9`. The default cluster config is normally `2`.\n- The verbosity value `10` is accepted but the operation is `show` instead of `set` (e.g. `--virt-api=10` = `--virt-api`).\n- Flag syntax must be `flag=arg` (`flag arg` not supported)\n\nFlags:\n```\n --all uint[=10] show/set all component log verbo", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/cc @victortoso", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "--all uint[=10] show/set all component log verbosity (0-9) (default 11)\r\n-h, --help help for log-verbosity\r\n --reset reset log verbosity to the default verbosity (2) (empty the log verbosity)\r\n --virt-api uint[=10] show/set virt-api log verbosity (0-9) (default 11)\r\n --virt-controller uint[=10] show/set virt-controller log verbosity (0-9) (default 11)\r\n --virt-handler uint[=10] show/set virt-handler log verbosity (0-9) (default 11)\r\n --virt-launcher uint[=10] show/set virt-launcher log verbosity (0-9) (default 11)\r\n --virt-operator uint[=10] show/set virt-operator log verbosity (0-9) (default 11)", + "# reset (to default) log-verbosity for all components\r\nvirtctl adm logVerbosity –reset\r\n\r\n# show log-verbosity for all components:\r\nvirtctl adm log-verbosity --all\r\n# set log-verbosity to 3 for all components:\r\nvirtctl adm log-verbosity --all=3\r\n \r\n# show log-verbosity for virt-handler:\r\nvirtctl adm log-verbosity --virt-handler\r\n# set log-verbosity to 7 for virt-handler:\r\nvirtctl adm log-verbosity --virt-handler=7\r\n\r\n# show log-verbosity for virt-handler and virt-launcher\r\nvirtctl adm log-verbosity --virt-handler --virt-launcher\r\n# set log-verbosity for virt-handler to 7 and virt-launcher to 3\r\nvirtctl adm log-verbosity --virt-handler=7 --virt-launcher=3", + "# reset all components to default besides virt-handler which is 7\r\nvirtctl adm log-verbosity --reset --virt-handler=7\r\n \r\n# set all components to 3 besides virt-handler which is 7\r\nvirtctl adm log-verbosity --all=3 --virt-handler=7" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "size-xl", + "release-note", + "lgtm", + "approved", + "ok-to-test", + "dco-signoff--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/10244", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 3, + "comments": 109 + }, + "security": { + "scannedAt": "2026-02-27T17:45:14.491Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-10923-provide-way-to-customize-generated-multus-pod-annotation.json b/solutions/cncf-generated/kubevirt/kubevirt-10923-provide-way-to-customize-generated-multus-pod-annotation.json new file mode 100644 index 00000000..20c4dbfd --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-10923-provide-way-to-customize-generated-multus-pod-annotation.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:16.195Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: Provide way to customize generated Multus pod annotation", + "description": "**What this PR does / why we need it**:\nWhen virt-controller is generating the Multus pod annotation, several properties can be set:\n- `name`\n- `namespace`\n- `interface`\n- `mac` _(optional)_\n\nThis proposal enables the customization of the Multus annotation.\n\n**Which issue(s) this PR fixes**:\nFixes #4564\n\n**Checklist**\n- [x] Design: A [design document](https://github.com/kubevirt/community/tree/main/design-proposals) was considered and is present (link) or **not required**\n- [x] PR: The PR description is expressive enough and will help future contributors\n- [x] Code: [Write code that humans can understand](https://en.wikiquote.org/wiki/Martin_Fowler#code-for-humans) and [Keep it simple](https://en.wikipedia.org/wiki/KISS_principle)\n- [x] Refactor: You have [left the code cleaner than you found it (Boy Scout Rule)](https://learning.oreilly.com/library/view/97-things-every/9780596809515/ch08.html)\n- [x] Upgrade: Impact of this change on upgrade flows was considered and addressed if requir", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Documentation update tied to pull request: [Provide way to customize generated Multus pod annotation](https://github.com/kubevirt/kubevirt/pull/10923)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "size-l", + "sig-network", + "sig-storage", + "release-note", + "lifecycle-rotten", + "kind-api-change", + "needs-ok-to-test", + "dco-signoff--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [ + "Pod", + "Namespace" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/10923", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 3, + "comments": 24 + }, + "security": { + "scannedAt": "2026-02-27T17:45:16.195Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-11256-rpm-update-virtualization-packages.json b/solutions/cncf-generated/kubevirt/kubevirt-11256-rpm-update-virtualization-packages.json new file mode 100644 index 00000000..f1a015f9 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-11256-rpm-update-virtualization-packages.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:04.399Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: rpm: Update virtualization packages", + "description": "### What this PR does\n\nUpdate virtualization packages. Specifically:\n\n* QEMU (8.0.0 → 8.2.0)\n* libvirt (9.5.0 → 10.0.0)\n* SeaBIOS (1.16.1 → 1.16.3)\n* EDKII (20230524 → 20231122)\n* passt (20230818 → 20231204)\n* virtiofsd (1.7.2 → 1.1.10.1)\n* guestfs-tools (1.50.1 → 1.51.6)\n\nFixes #10208 by adding the `usb-redir` device on Arm.\n\n### Release note\n```release-note\nThis version of KubeVirt includes upgraded virtualization technology based on libvirt 10.0.0 and QEMU 8.2.0.\nEach new release of libvirt and QEMU contains numerous improvements and bug fixes.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**What this PR does / why we need it**:\nEnable building and running kubevirt on IBM Z Platform.\n\nThe Purpose of this Draft PR is not to merge it as is, but rather have a discussion about the Changes and start gathering Feedback from the community.\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #\n\n**Special notes for your reviewer**:\nSince this is a WIP PR, here are my open TODOs for the PR:\n\nTODOs:\n- [x] Test all changed build scripts on s390x\n- [x] Rework Graphic device\n- [x] Disable ACPI by default on s390x\n- [x] Add s390x to tests\n- [x] Enable cross-compiling\n- [x] Do not build every container image on s390x, as some base images are not available\n- [x] Cleanup commits (Squash, wrong author etc.)\n- [x] Rebase PR on main\n- [ ] (Optional) Split PR into multiple smaller PRs\n- [x] Write Release Note\n- [x] Checklist below\n\n**Checklist**\n\nThis checklist is not enforcing, but it's a reminder of items that could be relevant to every PR.\nApprovers are expected to review this list.\n\n- [x] Design: A [design document](https://github.com/kubevirt/community/tree/main/design-proposals) was considered and is present (link) or not required\n- [x] PR: The PR description is expressive enough and will help future contributors\n- [x] Code: [Write code that humans can understand](https://en.wikiquote.org/wiki/Martin_Fowler#code-for-humans) and [Keep it simple](https://en.wikipedia.o", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\r\n\r\n**What this PR does / why we need it**:\r\nEnable building and running kubevirt on IBM Z Platform.\r\n\r\nThe Purpose of this Draft PR is not to merge it as is, but rather have a discussion about the Changes and start gathering Feedback from the community.\r\n\r\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\r\nFixes #\r\n\r\n**Special notes for your reviewer**:\r\nSince this is a WIP PR, here are my open TODOs for the PR:\r\n\r\nTODOs:\r\n- [x] Test all changed build scripts on s390x\r\n- [x] Rework Graphic device\r\n- [x] Disable ACPI by default on s390x\r\n- [x] Add s390x to tests\r\n- [x] Enable cross-compiling\r\n- [x] Do not build every container image on s390x, as some base images are not available\r\n- [x] Cleanup commits (Squash, wrong author etc.)\r\n- [x] Rebase PR on main\r\n- [ ] (Optional) Split PR into multiple smaller PRs\r\n- [x] Write Release Note\r\n- [x] Checklist below\r\n\r\n**Checklist**\r\n\r\nThis checklist is not enforcing, but it's a reminder of items that could be relevant to every PR.\r\nApprovers are expected to review this list.\r\n\r\n- [x] Design: A [design document](https://github.com/kubevirt/community/tree/main/design-proposals) was considered and is present (link) or not required\r\n- [x] PR: The PR description is expressive enough and will help future contributors\r\n- [x] Code: [Write code that humans can understand](https://en.wikiquote.org/wiki/Martin_Fowler#code-for-humans) and [Keep it simple](https://en.wikipedia.org/wiki/KISS_principle)\r\n- [x] Refactor: You have [left the code cleaner than you found it (Boy Scout Rule)](https://learning.oreilly.com/library/view/97-things-every/9780596809515/ch08.html)\r\n- [x] Upgrade: Impact of this change on upgrade flows was considered and addressed if required\r\n- [x] Testing: New code requires [new unit tests](https://github.com/kubevirt/kubevirt/blob/main/docs/reviewer-guide.md#when-is-a-pr-good-enough). New features and bug fixes require at least on e2e test\r\n- [x] Documentation: A [user-guide update](https://github.com/kubevirt/user-guide/) was considered and is present (link) or not required. You want a user-guide update if it's a user facing feature / API change.\r\n- [x] Community: Announcement to [kubevirt-dev](https://groups.google.com/g/kubevirt-dev/) was considered\r\n\r\n**Release note**:\r\n", + "It seems however that although the migration is in a Running phase, the new virt-launcher instance is already ready and running:", + "However, when I glanced over virt-handler's logs I saw many of these:" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "sig-network", + "release-note", + "size-xxl", + "lgtm", + "approved", + "dco-signoff--yes", + "sig-buildsystem" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [], + "difficulty": "expert", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/11256", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 5, + "comments": 72 + }, + "security": { + "scannedAt": "2026-02-27T17:45:04.399Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-12793-usbredir-test-add-ginkgorecover-for-goroutines.json b/solutions/cncf-generated/kubevirt/kubevirt-12793-usbredir-test-add-ginkgorecover-for-goroutines.json new file mode 100644 index 00000000..f086e064 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-12793-usbredir-test-add-ginkgorecover-for-goroutines.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:06.002Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: usbredir test: Add GinkgoRecover for goroutines", + "description": "### What this PR does\nFixes https://github.com/kubevirt/kubevirt/pull/12640#issuecomment-2343226644\n\nFixes #12787\n\n### Special notes for your reviewer\n\n### Release note\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "### What this PR does\n\n**Before this PR:**\n\nWhen a Virtual Machine Instance (VMI) is shut down from within the guest OS, the VMI transitions to the `Succeeded` state and remains there, which triggers a Pod Disruption Budget (PDB) alert.\n\n**After this PR:**\n\nThe PDB associated with the VMI will be automatically deleted when the VMI shutdown is detected. This prevents the PDB alert from firing, eliminating unnecessary alerts.\n\n### Why we need it and why it was done this way\n\nWithout this fix, shutting down a VMI from within the guest OS leads to a PDB alert, which could mislead users about the state of their VMs.\n\n**Approach:**\n\nThe chosen solution deletes the PDB when the VMI reaches the `Succeeded` phase. This allows the VMI to remain available for inspection by the user, including status, conditions, or virt-launcher pod logs.\n\n### Special notes for your reviewer\n\nThe end-to-end test could potentially be improved.\n\n### Checklist\n\nThis checklist is not enforcing, but it's a reminder of items that could be relevant to every PR.\nApprovers are expected to review this list.\n\n- [ ] Design: A [design document](https://github.com/kubevirt/community/tree/main/design-proposals) was considered and is present (link) or not required\n- [x] PR: The PR description is expressive enough and will help future contributors\n- [x] Code: [Write code that humans can understand](https://en.wikiquote.org/wiki/Martin_Fowler#code-for-humans) and [Keep it simple](https://en.wikipedia.org/wiki/KISS_princi", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "### What this PR does\r\n\r\n**Before this PR:**\r\n\r\nWhen a Virtual Machine Instance (VMI) is shut down from within the guest OS, the VMI transitions to the `Succeeded` state and remains there, which triggers a Pod Disruption Budget (PDB) alert.\r\n\r\n**After this PR:**\r\n\r\nThe PDB associated with the VMI will be automatically deleted when the VMI shutdown is detected. This prevents the PDB alert from firing, eliminating unnecessary alerts.\r\n\r\n### Why we need it and why it was done this way\r\n\r\nWithout this fix, shutting down a VMI from within the guest OS leads to a PDB alert, which could mislead users about the state of their VMs.\r\n\r\n**Approach:**\r\n\r\nThe chosen solution deletes the PDB when the VMI reaches the `Succeeded` phase. This allows the VMI to remain available for inspection by the user, including status, conditions, or virt-launcher pod logs.\r\n\r\n### Special notes for your reviewer\r\n\r\nThe end-to-end test could potentially be improved.\r\n\r\n### Checklist\r\n\r\nThis checklist is not enforcing, but it's a reminder of items that could be relevant to every PR.\r\nApprovers are expected to review this list.\r\n\r\n- [ ] Design: A [design document](https://github.com/kubevirt/community/tree/main/design-proposals) was considered and is present (link) or not required\r\n- [x] PR: The PR description is expressive enough and will help future contributors\r\n- [x] Code: [Write code that humans can understand](https://en.wikiquote.org/wiki/Martin_Fowler#code-for-humans) and [Keep it simple](https://en.wikipedia.org/wiki/KISS_principle)\r\n- [x] Refactor: You have [left the code cleaner than you found it (Boy Scout Rule)](https://learning.oreilly.com/library/view/97-things-every/9780596809515/ch08.html)\r\n- [ ] Upgrade: Impact of this change on upgrade flows was considered and addressed if required\r\n- [ ] Testing: New code requires [new unit tests](https://github.com/kubevirt/kubevirt/blob/main/docs/reviewer-guide.md#when-is-a-pr-good-enough). New features and bug fixes require at least one e2e test\r\n- [ ] Documentation: A [user-guide update](https://github.com/kubevirt/user-guide/) was considered and is present (link) or not required. Consider a user-guide update if it's a user-facing feature / API change.\r\n- [ ] Community: Announcement to [kubevirt-dev](https://groups.google.com/g/kubevirt-dev/) was considered\r\n\r\n### Release note", + "/kind flake\n/lgtm\r\n@dhiller @xpivarc nice find\n/approve\n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *acardace*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=kubevirt%2Fkubevirt).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[tests/OWNERS](https://github.com/kubevirt/kubevirt/blob/main/tests/OWNERS)~~ [acardace]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\nRequired labels detected, running phase 2 presubmits:\n/test pull-kubevirt-e2e-windows2016\n/test pull-kubevirt-e2e-kind-1.27-vgpu\n/test pull-kubevirt-e2e-kind-sriov\n/test pull-kubevirt-e2e-k8s-1.30-ipv6-sig-network\n/test pull-kubevirt-e2e-k8s-1.29-sig-network\n/test pull-kubevirt-e2e-k8s-1.29-sig-storage\n/test pull-kubevirt-e2e-k8s-1.29-sig-compute\n/test pull-kubevirt-e2e-k8s-1.29-sig-operator\n/test pull-kubevirt-e2e-k8s-1.30-sig-network\n/test pull-kubevirt-e2e-k8s-1.30-sig-storage\n/test pull-kubevirt-e2e-k8s-1.30-sig-compute\n/test pull-kubevirt-e2e-k8s-1.30-sig-operator\n\n/kind failing-test\r\n/priority critical-urgent\nref: https://github.com/kubevirt/kubevirt/issues/12787\nWow! Are the usbredir tests broken? https://prow.ci.kubevirt.io/view/gs/kubevirt-prow/pr-logs/pull/kubevirt_kubevirt/12793/pull-kubevirt-check-tests-for-flakes/1833816822990770176\n> Wow! Are the usbredir tests broken? https://prow.ci.kubevirt.io/view/gs/kubevirt-prow/pr-logs/pull/kubevirt_kubevirt/12793/pull-kubevirt-check-tests-for-flakes/1833816822990770176\r\n\r\nThey are flaky and it is good that the lane detects it as well\nWe had [this check ](https://github.com/kubevirt/kubevirt/pull/11643#discussion_r1621290531)in the PR that introduced but it wasn't failing at that time. Not sure what has changed?\nI honestly think it is fine to have GinkoRecover() in tests, so I'll also\r\n/lgtm\n/override pull-kubevirt-e2e-k8s-1.29-sig-storage\r\n\r\nUnrelated to this change.\n@dhiller: Overrode contexts on behalf of dhiller: pull-kubevirt-e2e-k8s-1.29-sig-storage\n\n
\n\nIn response to [this](https://github.com/kubevirt/kubevirt/pull/12793#issuecomment-2343587089):\n\n>/override pull-kubevirt-e2e-k8s-1.29-sig-storage\r\n>\r\n>Unrelated to this change.\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes-sigs/prow](https://github.com/kubernetes-sigs/prow/issues/new?title=Prow%20issue:) repository.\n
\n> /override pull-kubevirt-e2e-k8s-1.29-sig-storage\r\n> \r\n> Unrelated to this change.\r\n\r\nDefinitely unrelated, I am seeing that the guest-console-log container exited for some reason:\r\nhttps://storage.googleapis.com/kubevirt-prow/pr-logs/pull/kubevirt_kubevirt/12793/pull-kubevirt-e2e-k8s-1.29-sig-storage/1833818950031380480/artifacts/k8s-reporter/1/1_pods.log\r\nBut don't see anything in the log/yaml" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "priority-critical-urgent", + "size-xs", + "release-note-none", + "lgtm", + "approved", + "dco-signoff--yes", + "sig-testing", + "kind-flake", + "kind-failing-test" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/12793", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 4, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:45:06.002Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-13690-bug-fix-add-machine-type-to-nodeselector-to-prevent-breaking-chan.json b/solutions/cncf-generated/kubevirt/kubevirt-13690-bug-fix-add-machine-type-to-nodeselector-to-prevent-breaking-chan.json new file mode 100644 index 00000000..3c91722e --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-13690-bug-fix-add-machine-type-to-nodeselector-to-prevent-breaking-chan.json @@ -0,0 +1,55 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:07.394Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: bug-fix: add machine type to `NodeSelector` to prevent breaking changes on unsupported nodes ", + "description": "### What this PR does\nBefore this PR:\nNodes labeled by the node labeller did not include supported machine types. As a result, a Virtual Machine (VM) with a specific machine type could be scheduled on a node that does not support it. This would cause the VM launcher to fail.\n\nAfter this PR:\n* Nodes are now labeled by the node-labeller with all supported machine types available on each node.\n* When `virt-laucnher` pods being created during vmi starts or during migration, they will have a label in the `NodeSelector` indicating their specific machine type, which will allow the vms to be scheduled only on supported nodes, preventing breaking changes. \n\nFixes #13664\n\n### Why we need it and why it was done in this way\nThe following tradeoffs were made:\n\nThe following alternatives were considered:\n\nLinks to places where the discussion took place: \n\n### Special notes for your reviewer\n\n### Checklist\n\nThis checklist is not enforcing, but it's a reminder of items that could be relevant to every ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/cc", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "kind-bug", + "kind-enhancement", + "size-l", + "area-launcher", + "area-controller", + "area-handler", + "release-note", + "kind-api-change", + "lgtm", + "approved" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [ + "Pod", + "Node" + ], + "difficulty": "expert", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/13690", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 4, + "comments": 50 + }, + "security": { + "scannedAt": "2026-02-27T17:45:07.395Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-13749-fix-virt-controller-prevent-exec-probes-to-be-wrapped-twice.json b/solutions/cncf-generated/kubevirt/kubevirt-13749-fix-virt-controller-prevent-exec-probes-to-be-wrapped-twice.json new file mode 100644 index 00000000..3b7ce573 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-13749-fix-virt-controller-prevent-exec-probes-to-be-wrapped-twice.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:18.487Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: fix(virt-controller): prevent exec probes to be wrapped twice", + "description": "### What this PR does\nBefore this PR:\n* There's an unidentified \"double reconcile\" and/or kind of race condition that is wrapping the exec conmmand twice when processing a VMI.\n* Wrapping a `virt-probe` inside another `virt-probe` command definitely breaks the VMI and then requires a manual operation to fix it.\n\nAfter this PR:\n* wrapping function now verfiies that VMI probe exec command is not already wrapped before wrapping it.\n\n**Fixes #11755**\n\n### Why we need it and why it was done in this way\nThe following tradeoffs were made:\n* _none_\n\n### Checklist\n\nThis checklist is not enforcing, but it's a reminder of items that could be relevant to every PR.\nApprovers are expected to review this list.\n\n- [ ] Design: A [design document](https://github.com/kubevirt/community/tree/main/design-proposals) was considered and is present (link) or not required\n- [ ] PR: The PR description is expressive enough and will help future contributors\n- [ ] Code: [Write code that humans can understand](https", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "kind-bug", + "size-m", + "area-controller", + "area-virtctl", + "release-note-none", + "lgtm", + "approved", + "dco-signoff--yes", + "sig-compute" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [], + "difficulty": "expert", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/13749", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 3, + "comments": 35 + }, + "security": { + "scannedAt": "2026-02-27T17:45:18.487Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-2298-podsecuritypolicy-support-added-when-admission-controller-is-enabl.json b/solutions/cncf-generated/kubevirt/kubevirt-2298-podsecuritypolicy-support-added-when-admission-controller-is-enabl.json new file mode 100644 index 00000000..2e6e29d8 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-2298-podsecuritypolicy-support-added-when-admission-controller-is-enabl.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:09.655Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: PodSecurityPolicy Support added when admission controller is enabled", + "description": "**What this PR does / why we need it**:\nPodSecurityPolicy support is required when admission controller is enabled.\nIf admission controller is enabled, virt-handler couldn't be started without proper pod security defined.\n\nTo fix this problem, following logic added:\n1, defined PodSecurityPolicy kubevirt-privileged-psp\n2, role kubevirt-handler could use psp \"Kubevirt-privileged-psp\"\n3, role kubevirt-operator could write/read PodSecurityPolicy \n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #2221\n\n**Special notes for your reviewer**:\nPodSecurityPolicy \"kubevirt-privileged-psp\" is created.\nAttachment is detailed information.\n\n[psp.yaml.txt](https://github.com/kubevirt/kubevirt/files/3197582/psp.yaml.txt)\n\n**Release note**:\n\n```release-note\nNone\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@yu2003w I will definitely have a look. We are on kubecon this week though. Can have a look on Monday. @davidvossel do you have some bandwidth?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Hello contributor, thanks for submitting a PR for this project! \n\nI am the bot who triggers \"standard-CI\" builds for this project.\nAs a security measure, I will not run automated tests on PRs that are not from white-listed contributors.\n\nIn order to allow automated tests to run, please ask one of the project maintainers to review the code and then do one of the following:\n\n1. Type `ci test please` on this PR to trigger automated tests for it.\n2. Type `ci add to whitelist` on this PR to trigger automated tests for it and also add you to the contributor white-list so that your future PRs will be tested automatically. ( keep in mind this list might be overwritten if the job XML is refreshed, for permanent whitelisting, please follow #3 option )\n3. If you are planning to contribute to more than one project, maybe it's better to ask them to add you to the project organization, so you'll be able to run tests for all the organization's projects.\nHi @yu2003w. Thanks for your PR.\n\nI'm waiting for a [kubevirt](https://github.com/orgs/kubevirt/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/kubevirt/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=kubevirt%2Fkubevirt).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\n@rmohr @slintes could you please help to review this?\n@yu2003w I will definitely have a look. We are on kubecon this week though. Can have a look on Monday. @davidvossel do you have some bandwidth?\nRebuilding the k8s clustesr in https://github.com/kubevirt/kubevirtci/pull/81, will add the shasums of the result once they are done with building.\n@yu2003w could you apply this patch?", + "These clusters have PSP enabled. We can then whitelist your PR for tests.\n/ok-to-test\n/retest\nWe may see the following issues here:\r\n * network provider in k8s multus lane does not come up, I guess it does not support PSP\r\n * genie lane is stuck on starting at line `wait for genie to inject its configuration to /etc/cni/net.d/`, I guess PSP again\r\n * openshift providers contain the usual flakes (look good)\r\n * let's see what the plain k8s lanes will say.\r\n\r\n@cynepco3hahue would be great if you can help out a little bit on that PR, I am on PTO till Monday.\nI still can see this errors" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "size-xxl", + "release-note-none", + "kind-api-change", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [ + "Pod", + "Role" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/2298", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 3, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:45:09.655Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-2597-gpu-and-vgpu-support-for-kubevirt-vms.json b/solutions/cncf-generated/kubevirt/kubevirt-2597-gpu-and-vgpu-support-for-kubevirt-vms.json new file mode 100644 index 00000000..62876b16 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-2597-gpu-and-vgpu-support-for-kubevirt-vms.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:00.422Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: GPU and vGPU support for Kubevirt VMs", + "description": "**What this PR does / why we need it**:\nThese are changes to pull pci ids or mdev uuid from the environment variable set by device plugin and create appropriate hostdev devices in libvirt VM xml.\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #2585\n\n**Special notes for your reviewer**:\n\n**Release note**:\n\n```release-note\nProvides ability to request Nvidia GPU and vGPU exposed by Nvidia Kubevirt Device Plugin.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "What about the \"devices.kubernetes.io\" or \"devices.kubevirt.io\" vendor name?\n\nIn the email thread it was discussed to define an interface (env vars and\nbehavior) that DPs will need to implement in order to work with the device\npassthrough impl in kubevirt.\nAnd we'd expect that devices requested with \"devices.kubevirt.io\" (or \"\ngpu.devices.kubevirt.io\") will implement the defined interface.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "size-xl", + "release-note", + "kind-api-change", + "lgtm", + "approved", + "ok-to-test", + "dco-signoff--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/2597", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 5, + "comments": 43 + }, + "security": { + "scannedAt": "2026-02-27T17:45:00.422Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-3613-add-kernel-boot-feature-to-boot-vms.json b/solutions/cncf-generated/kubevirt/kubevirt-3613-add-kernel-boot-feature-to-boot-vms.json new file mode 100644 index 00000000..36119364 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-3613-add-kernel-boot-feature-to-boot-vms.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:10.727Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: Add Kernel Boot feature to boot VMs", + "description": "This is a draft PR.\n**What this PR does / why we need it**:\nThis PR enables the VM to boot directly from a kernel and initrd stored in the host OS, allowing command line arguments to be passed directly to the installer.\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #2741 \n\n**Special notes for your reviewer**:\nThis is a draft PR. \nProposed design document can be found [here ](https://docs.google.com/document/d/1RHIh8l906WIno-zhhpzgGhL-795uZUr_YM6_noLEtvw/edit#)\n\n**Release note**:\n```release-note\nAllow users to boot a VM directly from kernel, initrd and command line arguments.\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "There are still changes that need to be done as per [1]\n@hritvi are you still planning to work on this or would you prefer someone else to complete this work?\n\n[1] https://github.com/kubevirt/kubevirt/pull/3613#pullrequestreview-470359733", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "size-l", + "release-note", + "do-not-merge-hold", + "lifecycle-rotten", + "needs-rebase", + "kind-api-change", + "do-not-merge-work-in-progress", + "dco-signoff--no" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/3613", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 3, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:10.727Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-732-manifests-adding-scc-manifests.json b/solutions/cncf-generated/kubevirt/kubevirt-732-manifests-adding-scc-manifests.json new file mode 100644 index 00000000..50a0b3f4 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-732-manifests-adding-scc-manifests.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:20.938Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: manifests: Adding SCC manifests", + "description": "- The SCC manifests are required for the deployment of Kubevirt on\n Openshift.\n\n- Currently, the granted privileges are the same as in the \"privileged\"\n SCC. Once we stabilize the functional tests on Openshift, we can start to drop\n unneeded privileges from the SCC.\n\ncloses https://github.com/kubevirt/kubevirt/issues/573\n\nSigned-off-by: gbenhaim ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Could you you move the manifest files into the existign sub directories? i.e. `testing/openshift-scc.yaml` …", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "kubectl apply -f kubevirt.yaml -f with-openshift.yaml", + "cluster/k8s-1.9.3/.kubectl create -f /var/lib/jenkins/workspace/kubevirt-functional-tests-vagrant-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/for-openshift.yaml\r\nerror: unable to recognize \"/var/lib/jenkins/workspace/kubevirt-functional-tests-vagrant-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/for-openshift.yaml\": no matches for /, Kind=SecurityContextConstraints\r\nmake: *** [cluster-deploy] Error 1" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/732", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 2, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:45:20.938Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-7451-virt-launcher-reduce-memory-consumption.json b/solutions/cncf-generated/kubevirt/kubevirt-7451-virt-launcher-reduce-memory-consumption.json new file mode 100644 index 00000000..ab713ef6 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-7451-virt-launcher-reduce-memory-consumption.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:17.388Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: [virt-launcher] Reduce memory consumption", + "description": "**What this PR does / why we need it**:\nCurrently, `virt-launcher` starts as a monitoring process which then creates the actual `virt-launcher` process. The monitoring process watches the subprocess and waits for its signals.\nThe monitoring process has the same code and dependencies as the virt-launcher although it doesn't need all of them. This results in high memory usage for a simple process that just waits for something to happen.\nAll of this happens for each VMI in the cluster.\nInstead of fork itself, creating a `virt-launcher-monitor` process that acts as an entry point and starts the real `virt-launcher` process passing to it all the arguments can reduce the amount of memory used.\n\nHere's the result of the executed tests:\n![results](https://user-images.githubusercontent.com/4507192/160111860-d27e374b-6a8f-4010-826d-1b04a1527ab8.png)\n\nIt is about 50M per VM\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close th", + "type": "analyze", + "status": "completed", + "resolution": { + "summary": "Hi, this is very cool PR (I'm new here and just going over the code, issues, and PRs). I wanted to ask if `virt-launcher-monitor` shouldn't be documented somewhere? (e.g. in a dedicated `README.md` like [this](https://github.com/kubevirt/kubevirt/tree/main/cmd/virt-handler#readme) or maybe [here](https://github.com/kubevirt/kubevirt/blob/main/docs/components.md#virt-launcher))", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "size-xl", + "lgtm", + "approved", + "kind-build-change", + "dco-signoff--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/7451", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 3, + "comments": 24 + }, + "security": { + "scannedAt": "2026-02-27T17:45:17.388Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-7879-sig-scale-increase-virt-controller-qps-and-burst-default-configura.json b/solutions/cncf-generated/kubevirt/kubevirt-7879-sig-scale-increase-virt-controller-qps-and-burst-default-configura.json new file mode 100644 index 00000000..e76e345d --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-7879-sig-scale-increase-virt-controller-qps-and-burst-default-configura.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:11.816Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: [sig-scale] Increase virt-controller QPS and Burst default configuration", + "description": "**What this PR does / why we need it**:\n\nWith the current default rate limiting configuration for virt-controller, creating 1000 VMs in a 12-node cluster can take up to 23 minutes in total, which is very slow.More info [here](https://docs.google.com/document/d/1kJGXIClq0hOKAgu29LsO--Xc58WYvn_L/edit#heading=h.gjdgxs).\n\n\"image\"\n\nConsider the figure above, where I increased the virt-controller's QPS/Burst from the original value of 20/30 to 100/220, 200/400 and 400/600, with a total of 4 scenarios.\nNote that while pod creation takes up to 25 seconds, the VM creation time with the original rate limiting configuration can take up to 22.7 minutes. Which is not acceptable. After increasing virt-controller's QPS/Burst to 400/600, the maximum VM creation latency was 25s.\n\n\"image\"\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #7738\n\n**Special notes for your reviewer**:\n\n**Release note**:\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/retest", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Waiting for it to complete...\r\nAnother command (pid=16) is running. Waiting for it to complete on the server (server_pid=20)...\r\nLoading: 0 packages loaded\r\n Fetching com_github_bazelbuild_buildtools; fetching\r\nmake: *** [Makefile:187: format] Error 1" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "size-s", + "release-note-none", + "lgtm", + "approved", + "ok-to-test", + "dco-signoff--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/8028", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 3, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:12.808Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-9789-builder-create-kubevirt-source-directory-and-mark-it-as-safe-for-g.json b/solutions/cncf-generated/kubevirt/kubevirt-9789-builder-create-kubevirt-source-directory-and-mark-it-as-safe-for-g.json new file mode 100644 index 00000000..cd3e136e --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-9789-builder-create-kubevirt-source-directory-and-mark-it-as-safe-for-g.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:19.571Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: builder: Create KubeVirt source directory and mark it as safe for git", + "description": "**What this PR does / why we need it**:\n\nResolves the following error when using builder >= 2304121609-faf545b82 that now provides git-2.39.1-1.el9.x86_64 (previously git-2.31.1-2.el9.2.x86_64 with 2212180911-8818abcfa):\n\n```\n$ make generate\n[..]\n+ /root/go/src/kubevirt.io/kubevirt/hack/build-go.sh generate\nfatal: detected dubious ownership in repository at '/root/go/src/kubevirt.io/kubevirt'\nTo add an exception for this directory, call:\n\n git config --global --add safe.directory /root/go/src/kubevirt.io/kubevirt\n[..]\n+ go build\nerror obtaining VCS status: exit status 128\n Use -buildvcs=false to disable VCS stamping.\nmake: *** [Makefile:48: generate] Error 1\n```\n\nThis could be handled in the individual build and generate scripts but would result in the same directory being appended to the global git config with each run. Adding it once during the build of the builder image is cleaner.\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes # /lgtm\r\n> \r\n> not on a machine right now where I could build and push the image for you. Or do we have the CI jobs now in place to do it async?\r\n\r\nThe postsubmit job `publish-kubevirt-builder` [1] is working now so that should happen async thanks!\r\n\r\nhttps://prow.ci.kubevirt.io/job-history/gs/kubevirt-prow/logs/publish-kubevirt-builder\n/approve\n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *rmohr*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=kubevirt%2Fkubevirt).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[OWNERS](https://github.com/kubevirt/kubevirt/blob/main/OWNERS)~~ [rmohr]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n@lyarwood Thank you! You're fast :)\n/retest-required\nThis bot automatically retries required jobs that failed/flaked on approved PRs.\nSilence the bot with an `/lgtm cancel` or `/hold` comment for consistent failures.\n/retest-required\nThis bot automatically retries required jobs that failed/flaked on approved PRs.\nSilence the bot with an `/lgtm cancel` or `/hold` comment for consistent failures.\n/retest-required\nThis bot automatically retries required jobs that failed/flaked on approved PRs.\nSilence the bot with an `/lgtm cancel` or `/hold` comment for consistent failures.\n/retest-required\nThis bot automatically retries required jobs that failed/flaked on approved PRs.\nSilence the bot with an `/lgtm cancel` or `/hold` comment for consistent failures.\n/retest-required\nThis bot automatically retries required jobs that failed/flaked on approved PRs.\nSilence the bot with an `/lgtm cancel` or `/hold` comment for consistent failures.\nNot sure that it's related but there is still issue with the builder:", + "> Not sure that it's related but there is still issue with the builder:\n>" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "size-xs", + "release-note-none", + "lgtm", + "approved", + "dco-signoff--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/9789", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 2, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:45:19.571Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-9971-push-multiarch-manifest-for-utility-images.json b/solutions/cncf-generated/kubevirt/kubevirt-9971-push-multiarch-manifest-for-utility-images.json new file mode 100644 index 00000000..e08cb799 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-9971-push-multiarch-manifest-for-utility-images.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:08.376Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kubevirt: push multiarch manifest for utility images", + "description": "**What this PR does / why we need it**:\nhttps://github.com/kubevirt/kubevirt/pull/8989#issuecomment-1599126575\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #9964\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> > One question though, how much time it takes now to run the `make cluster-sync`? because I noticed that it take plenty of time, but maybe because I was testing multi arch i.e. `export BUILD_ARCH=aarch64,x86_64`\n> \n> I think `make cluster-sync` spend same time as before, as we only build host CPU arch images only when doing `make cluster-sync`. It is time consuming when doing cross build.\n\nYes, I think the dev flow is not touched by this, since `make bazel-push-images` is only used in the release script.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "size-l", + "release-note-none", + "lgtm", + "approved", + "dco-signoff--yes" + ], + "category": "workloads", + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubevirt/kubevirt/pull/9971", + "sourceRepo": "kubevirt/kubevirt", + "reactions": 3, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:45:08.376Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kuma/kuma-563-fix-xds-server-log-network-interface.json b/solutions/cncf-generated/kuma/kuma-563-fix-xds-server-log-network-interface.json new file mode 100644 index 00000000..e0a7b9a9 --- /dev/null +++ b/solutions/cncf-generated/kuma/kuma-563-fix-xds-server-log-network-interface.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:43.967Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kuma: fix(xds-server) log network interface", + "description": "* Added logging of interface\n\nFix #408\n\n### Added Logging of Interface of each server kuma-cp starts\n\n### Issues resolved\n\nFix #408", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[![CLA assistant check](https://cla-assistant.io/pull/badge/signed)](https://cla-assistant.io/Kong/kuma?pullRequest=563)
All committers have signed the CLA.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "2020-02-04T13:40:45.818+0100\tINFO\tadmin-server\tstarting server\t{\"address\": \"127.0.0.1:5679\"}\r\n2020-02-04T13:40:45.818+0100\tINFO\tgui-server\tstarting\t{\"address\": \":5683\"}\r\n2020-02-04T13:40:45.818+0100\tINFO\tapi-server\tstarting\t{\"address\": \":5681\"}\r\n2020-02-04T13:40:45.818+0100\tINFO\tbootstrap-server\tstarting\t{\"address\": \":5682\"}\r\n2020-02-04T13:40:45.818+0100\tINFO\txds-server.diagnostics\tstarting\t{\"address\": \":5680\"}\r\n2020-02-04T13:40:45.819+0100\tINFO\tmads-server.grpc\tstarting\t{\"address\": \"[::]:5676\"}\r\n2020-02-04T13:40:45.819+0100\tINFO\txds-server.grpc\tstarting\t{\"address\": \"[::]:5678\"}\r\n2020-02-04T13:40:45.819+0100\tINFO\tsds-server.grpc\tstarting\t{\"address\": \"[::]:5677\", \"tls\": true}", + "2020-02-04T13:40:45.818+0100\tINFO\tadmin-server\tstarting server\t{\"address\": \"127.0.0.1:5679\"}\r\n2020-02-04T13:40:45.818+0100\tINFO\tgui-server\tstarting\t{\"address\": \"0.0.0.0:5683\"}\r\n2020-02-04T13:40:45.818+0100\tINFO\tapi-server\tstarting\t{\"address\": \"0.0.0.0:5681\"}\r\n2020-02-04T13:40:45.818+0100\tINFO\tbootstrap-server\tstarting\t{\"address\": \"0.0.0.0:5682\"}\r\n2020-02-04T13:40:45.818+0100\tINFO\txds-server.diagnostics\tstarting\t{\"address\": \"0.0.0.0:5680\"}\r\n2020-02-04T13:40:45.819+0100\tINFO\tmads-server.grpc\tstarting\t{\"address\": \"0.0.0.0:5676\"}\r\n2020-02-04T13:40:45.819+0100\tINFO\txds-server.grpc\tstarting\t{\"address\": \"0.0.0.0:5678\"}\r\n2020-02-04T13:40:45.819+0100\tINFO\tsds-server.grpc\tstarting\t{\"address\": \"0.0.0.0:5677\", \"tls\": true}", + "2020-02-04T13:40:45.818+0100\tINFO\tadmin-server\tstarting server\t{\"interface\": \"127.0.0.1\", \"port\": 5679}\r\n2020-02-04T13:40:45.818+0100\tINFO\tgui-server\tstarting\t{\"interface\": \"0.0.0.0\", \"port\": 5683}\r\n2020-02-04T13:40:45.818+0100\tINFO\tapi-server\tstarting\t{\"interface\": \"0.0.0.0\", \"port\": 5681}\r\n2020-02-04T13:40:45.818+0100\tINFO\tbootstrap-server\tstarting\t{\"interface\": \"0.0.0.0\", \"port\": 5682}\r\n2020-02-04T13:40:45.818+0100\tINFO\txds-server.diagnostics\tstarting\t{\"interface\": \"0.0.0.0\", \"port\": 5680}\r\n2020-02-04T13:40:45.819+0100\tINFO\tmads-server.grpc\tstarting\t{\"interface\": \"0.0.0.0\", \"port\": 5676}\r\n2020-02-04T13:40:45.819+0100\tINFO\txds-server.grpc\tstarting\t{\"interface\": \"0.0.0.0\", \"port\": 5678}\r\n2020-02-04T13:40:45.819+0100\tINFO\tsds-server.grpc\tstarting\t{\"interface\": \"0.0.0.0\", \"port\": 5677, \"tls\": true}" + ] + } + }, + "metadata": { + "tags": [ + "kuma", + "sandbox", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "kuma" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kumahq/kuma/pull/563", + "sourceRepo": "kumahq/kuma", + "reactions": 1, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:48:43.967Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kured/kured-213-replaced-annotationttl-with-lockttl-and-made-it-work-correctly.json b/solutions/cncf-generated/kured/kured-213-replaced-annotationttl-with-lockttl-and-made-it-work-correctly.json new file mode 100644 index 00000000..3948d87a --- /dev/null +++ b/solutions/cncf-generated/kured/kured-213-replaced-annotationttl-with-lockttl-and-made-it-work-correctly.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:54.459Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kured: Replaced --annotationTTL with --lockTTL and made it work correctly", + "description": "This is a follow-up on #54, #119 & #143\n\n- I renamed the --annotationTTL flag into --lockTTL as I believe it is a more pertinent nomenclature\n- I fixed its implementation as reported not being functional in #143", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Tested with `minikube`, k8s versions 1.17.14, 1.18.12, 1.19.4.\n\nCommits since 1.5.0 by bboreham, dholbach, evrardjp, mvisonneau, smueller18:\n\n# Breaking changes\n`--annotationTTL` was renamed to `--lockTTL` (Helm chart values accordingly).\n\n# Bug fixes\n- #213 and #238: Replaced --annotationTTL with --lockTTL and made it work correctly\n- #237: Drain: allow pods grace period to terminate\n- #166: Remove quote for parameter alert-filter-regexp\n\n# Clean Up\n- #217: Clean up deps, update docs to explain state post-210\n- #210: feature: Remove kubectl bin\n- #198: fix: Follow DKL-DI-0004 guideline\n- #194: add missing quote - thanks Karan Arora for reporting\n\n# Release automation\n- #185: Release helper\n- #221: Add Lint job in github actions\n- #222: Make go lint on pkg folder happier \n- #215: Make go lint on cmd folder happier", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kured", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "kured" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kubereboot/kured/pull/213", + "sourceRepo": "kubereboot/kured", + "reactions": 1, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:48:54.459Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kured/kured-496-added-support-for-multi-arch-image-build.json b/solutions/cncf-generated/kured/kured-496-added-support-for-multi-arch-image-build.json new file mode 100644 index 00000000..96bb4551 --- /dev/null +++ b/solutions/cncf-generated/kured/kured-496-added-support-for-multi-arch-image-build.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:46.914Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kured: Added support for multi-arch image build", + "description": "Modified workflow actions to use QEMU and docker buildx to create multi-arch image and manifest in docker hub and ghcr.\nRequired a multi-arch compatible Dockerfile as well.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This is a WIP-PR of a build with ARM support.\nI'm not really sure about the changes in the Makefile. With the changes for windows-support in mind from #460 this would create more and more complicated tooling-scripts to maintain.\n\nMy personal favourite is to do only the go-compilation and other small tasks in the makefile and move the image build and -publish to Github Actions which are already able to create multiarch-builds out of the box. But this would remove the ability to create images locally (without writing the commands on your own).\n\nI also would suggest to not separate different architectures by image-tag and use the same tags for different arches.\n\nWDYT?\n\nRef: #23", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "kured", + "sandbox", + "app-definition", + "enhancement", + "build" + ], + "category": "workloads", + "cncfProjects": [ + "kured" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubereboot/kured/pull/496", + "sourceRepo": "kubereboot/kured", + "reactions": 7, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:48:46.914Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kured/kured-56-build-arm64.json b/solutions/cncf-generated/kured/kured-56-build-arm64.json new file mode 100644 index 00000000..c61527b0 --- /dev/null +++ b/solutions/cncf-generated/kured/kured-56-build-arm64.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:48.071Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kured: Build arm64", + "description": "* Adds support for building and pushing arm64 Docker images\n* Updates kubectl to v1.13.1\n\nSince Quay does not currently support \"fat manifests\", the images are tagged the same way with the addition of the `-arm64` suffix for the new architecture.\n\nTested on my Kubernetes 1.13.1 Pine64 cluster.\n\n```\ntime=\"2018-12-27T22:48:54Z\" level=info msg=\"Kubernetes Reboot Daemon: build-arm64-114c349\"\ntime=\"2018-12-27T22:48:54Z\" level=info msg=\"Node ID: kube-node-1\"\ntime=\"2018-12-27T22:48:54Z\" level=info msg=\"Lock Annotation: kube-system/kured:weave.works/kured-node-lock\"\ntime=\"2018-12-27T22:48:54Z\" level=info msg=\"Reboot Sentinel: /var/run/reboot-required every 1h0m0s\"\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thanks for doing this, @jmreicha !\n\nI must say, I have a personal dislike for repeating sections of code where the only difference is a name (in this case the architecture). Did you consider eliding these and making the architecture a variable?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "time=\"2018-12-27T22:48:54Z\" level=info msg=\"Kubernetes Reboot Daemon: build-arm64-114c349\"\r\ntime=\"2018-12-27T22:48:54Z\" level=info msg=\"Node ID: kube-node-1\"\r\ntime=\"2018-12-27T22:48:54Z\" level=info msg=\"Lock Annotation: kube-system/kured:weave.works/kured-node-lock\"\r\ntime=\"2018-12-27T22:48:54Z\" level=info msg=\"Reboot Sentinel: /var/run/reboot-required every 1h0m0s\"" + ] + } + }, + "metadata": { + "tags": [ + "kured", + "sandbox", + "app-definition", + "no-pr-activity", + "build" + ], + "category": "workloads", + "cncfProjects": [ + "kured" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubereboot/kured/pull/56", + "sourceRepo": "kubereboot/kured", + "reactions": 5, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:48:48.071Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kured/kured-660-add-multiple-concurrent-node-reboot-feature.json b/solutions/cncf-generated/kured/kured-660-add-multiple-concurrent-node-reboot-feature.json new file mode 100644 index 00000000..70cf9d8b --- /dev/null +++ b/solutions/cncf-generated/kured/kured-660-add-multiple-concurrent-node-reboot-feature.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:50.581Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kured: Add multiple concurrent node reboot feature", + "description": "Currently in kured a single node can get a lock with Acquire. There could be situations where multiple nodes might want a lock in the event that a cluster can handle multiple nodes being rebooted. This adds the side-by-side implementation for a multiple node lock situation.\n\n### Testing done\n\nAdded unit tests. Also ran a manual test with `--concurrency=2`. I observed that two nodes rebooted at the same time:\n\n```\n$ kubectl get no\nNAME STATUS ROLES AGE VERSION\naks-nodepool1-14327021-vmss000000 Ready agent 24m v1.23.8\naks-nodepool1-14327021-vmss000001 Ready agent 24m v1.23.8\naks-nodepool1-14327021-vmss000002 Ready agent 24m v1.23.8\naks-nodepool1-14327021-vmss000003 Ready agent 24m v1.23.8\naks-nodepool1-14327021-vmss000004 NotReady,SchedulingDisabled agent 24m v1.23.8\naks-nodepool1-14327021-vmss000005 Re", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "My statement was a bit inaccurate: I mean that increasing concurrency is unsafe if the cluster uses topologies.\n\nYes, I can restart the tests. This one is a flaky failure, but typically only for one of the jobs.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ kubectl get no\r\nNAME STATUS ROLES AGE VERSION\r\naks-nodepool1-14327021-vmss000000 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000001 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000002 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000003 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000004 NotReady,SchedulingDisabled agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000005 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000006 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000007 Ready,SchedulingDisabled agent 24m v1.23.8", + "\"weave.works/kured-node-lock\": \"{\\\"maxOwners\\\":2,\\\"locks\\\":[{\\\"nodeID\\\":\\\"aks-nodepool1-14327021-vmss000007\\\",\\\"metadata\\\":{\\\"unschedulable\\\":false},\\\"created\\\":\\\"2022-09-23T21:06:30.814409507Z\\\",\\\"TTL\\\":0},{\\\"nodeID\\\":\\\"aks-nodepool1-14327021-vmss000004\\\",\\\"metadata\\\":{\\\"unschedulable\\\":false},\\\"created\\\":\\\"2022-09-23T21:06:57.626718467Z\\\",\\\"TTL\\\":0}]}\"", + "There are 5 nodes in the cluster\r\n0 nodes were removed from pool once:\r\n0 nodes removed from the pool are now back:\r\nResult of command kubectl get nodes ... showing unschedulable nodes:\r\nchart-testing-control-plane \r\nchart-testing-control-plane2 \r\nchart-testing-control-plane3 \r\nchart-testing-worker \r\nchart-testing-worker2 \r\nAttempt 1 failed! Trying again in 60 seconds...\r\n0 nodes were removed from pool once:\r\n0 nodes removed from the pool are now back:\r\nResult of command kubectl get nodes ... showing unschedulable nodes:\r\nchart-testing-control-plane true\r\nchart-testing-control-plane2 true\r\nchart-testing-control-plane3 true\r\nchart-testing-worker true\r\nchart-testing-worker2 true\r\nchart-testing-control-plane is now unschedulable!\r\nchart-testing-control-plane2 is now unschedulable!\r\nchart-testing-control-plane3 is now unschedulable!\r\nchart-testing-worker is now unschedulable!\r\nchart-testing-worker2 is now unschedulable!\r\nAttempt 2 failed! Trying again in 60 seconds..." + ] + } + }, + "metadata": { + "tags": [ + "kured", + "sandbox", + "app-definition", + "enhancement", + "keep" + ], + "category": "workloads", + "cncfProjects": [ + "kured" + ], + "targetResourceKinds": [ + "Role", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubereboot/kured/pull/660", + "sourceRepo": "kubereboot/kured", + "reactions": 2, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:48:50.581Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kured/kured-715-feat-add-reboot-required-annotation.json b/solutions/cncf-generated/kured/kured-715-feat-add-reboot-required-annotation.json new file mode 100644 index 00000000..0c637127 --- /dev/null +++ b/solutions/cncf-generated/kured/kured-715-feat-add-reboot-required-annotation.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:51.612Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kured: feat: Add `reboot-required` annotation", + "description": "#702", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@timo-42 I have not forgotten, but I am a bit busy at the moment.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "k logs kured-knvc5 -f\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Binding node-id command flag to environment variable: KURED_NODE_ID\"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Kubernetes Reboot Daemon: c7b7d6a\"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Node ID: node1\"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Lock Annotation: kube-system/kured:weave.works/kured-node-lock\"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Lock TTL not set, lock will remain until being released\"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Lock release delay set, lock release will be delayed by: 30m0s\"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"PreferNoSchedule taint: \"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Blocking Pod Selectors: []\"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Reboot schedule: ---MonTueWedThuFri--- between 02:30 and 06:00 Europe/Berlin\"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Reboot check command: [test -f /var/run/reboot-required] every 1m0s\"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Reboot command: [/bin/systemctl reboot]\"\r\ntime=\"2023-02-07T09:45:58Z\" level=info msg=\"Will annotate nodes during kured reboot operations\"\r\n\r\nk exec -it kured-knvc5 -- sh\r\n/ # wget -qO- 127.0.0.1:8080/metrics | grep kured\r\n# HELP kured_reboot_required OS requires reboot due to software updates.\r\n# TYPE kured_reboot_required gauge\r\nkured_reboot_required{node=\"node1\"} 1\r\n\r\nk describe node node1 | grep weave.works\r\n", + "k exec -it kured-pxkjr -- sh\r\n/ # wget -qO- 127.0.0.1:8080/metrics | grep kured\r\n# HELP kured_reboot_required OS requires reboot due to software updates.\r\n# TYPE kured_reboot_required gauge\r\nkured_reboot_required{node=\"node1\"} 1\r\n\r\nk logs kured-pxkjr -f\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Binding node-id command flag to environment variable: KURED_NODE_ID\"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Kubernetes Reboot Daemon: c7b7d6a\"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Node ID: node1\"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Lock Annotation: kube-system/kured:weave.works/kured-node-lock\"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Lock TTL not set, lock will remain until being released\"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Lock release delay set, lock release will be delayed by: 30m0s\"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"PreferNoSchedule taint: \"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Blocking Pod Selectors: []\"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Reboot schedule: ---MonTueWedThuFri--- between 10:00 and 13:00 Europe/Berlin\"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Reboot check command: [test -f /var/run/reboot-required] every 1m0s\"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Reboot command: [/bin/systemctl reboot]\"\r\ntime=\"2023-02-07T10:20:21Z\" level=info msg=\"Will annotate nodes during kured reboot operations\"\r\ntime=\"2023-02-07T10:22:56Z\" level=info msg=\"Reboot required\"\r\ntime=\"2023-02-07T10:22:56Z\" level=info msg=\"Adding node node1 annotation: weave.works/reboot-required=true\"\r\ntime=\"2023-02-07T10:22:56Z\" level=info msg=\"Adding node node1 annotation: weave.works/kured-reboot-in-progress=2023-02-07T10:22:56Z\"\r\ntime=\"2023-02-07T10:22:56Z\" level=info msg=\"Adding node node1 annotation: weave.works/kured-most-recent-reboot-needed=2023-02-07T10:22:56Z\"\r\ntime=\"2023-02-07T10:22:56Z\" level=info msg=\"Acquired reboot lock\"\r\ntime=\"2023-02-07T10:22:56Z\" level=info msg=\"Draining node node1\"\r\n...\r\ntime=\"2023-02-07T10:23:32Z\" level=info msg=\"Running command: [/usr/bin/nsenter -m/proc/1/ns/mnt -- /bin/systemctl reboot] for node: node1\"\r\ntime=\"2023-02-07T10:23:32Z\" level=info msg=\"Waiting for reboot\"\r\n\r\nk describe node node1 | grep weave.works\r\n weave.works/kured-most-recent-reboot-needed: 2023-02-07T10:22:56Z\r\n weave.works/kured-reboot-in-progress: 2023-02-07T10:22:56Z\r\n weave.works/reboot-required: true", + "k exec -it kured-pxkjr -- sh\r\n/ # wget -qO- 127.0.0.1:8080/metrics | grep kured\r\n# HELP kured_reboot_required OS requires reboot due to software updates.\r\n# TYPE kured_reboot_required gauge\r\nkured_reboot_required{node=\"node1\"} 0\r\n\r\nk logs kured-pxkjr -f\r\n...\r\ntime=\"2023-02-07T10:25:35Z\" level=info msg=\"Deleting node node1 annotation weave.works/reboot-required\"\r\ntime=\"2023-02-07T10:25:35Z\" level=info msg=\"Holding lock\"\r\ntime=\"2023-02-07T10:25:35Z\" level=info msg=\"Uncordoning node node1\"\r\ntime=\"2023-02-07T10:25:35Z\" level=info msg=\"Deleting node node1 annotation weave.works/kured-reboot-in-progress\"\r\ntime=\"2023-02-07T10:25:35Z\" level=info msg=\"Delaying lock release by 30m0s\"\r\n\r\nk describe node node1 | grep weave.works\r\n weave.works/kured-most-recent-reboot-needed: 2023-02-07T10:22:56Z" + ] + } + }, + "metadata": { + "tags": [ + "kured", + "sandbox", + "app-definition", + "enhancement" + ], + "category": "workloads", + "cncfProjects": [ + "kured" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubereboot/kured/pull/715", + "sourceRepo": "kubereboot/kured", + "reactions": 1, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:48:51.612Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kured/kured-813-sentinel-command-without-nsenter-by-default.json b/solutions/cncf-generated/kured/kured-813-sentinel-command-without-nsenter-by-default.json new file mode 100644 index 00000000..3edca6a9 --- /dev/null +++ b/solutions/cncf-generated/kured/kured-813-sentinel-command-without-nsenter-by-default.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:53.054Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kured: Sentinel-command without nsenter by default", + "description": "As long as there's no custom sentinel-command kured expects that the sentinel file (`/var/run/reboot-required` by default) to be mounted into the container. This would cause the sentinel-command to be executed without nsenter.\n\nclose #526", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Wait with merge until kubereboot/kured#813 is released.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "volumes:\r\n - hostPath:\r\n name: sentinel\r\n path: /opt/whatever/\r\n type: Directory\r\n read-only: true\r\nvolumeMounts:\r\n - name: sentinel\r\n mountPath: /kured\r\n\r\nconfiguration:\r\n rebootSentinel: /kured/reboot-required\r\n rebootSentinelCommand: \"\"" + ] + } + }, + "metadata": { + "tags": [ + "kured", + "sandbox", + "app-definition", + "enhancement", + "keep", + "security" + ], + "category": "workloads", + "cncfProjects": [ + "kured" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kubereboot/kured/pull/813", + "sourceRepo": "kubereboot/kured", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:48:53.054Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kured/kured-814-add-signal-reboot.json b/solutions/cncf-generated/kured/kured-814-add-signal-reboot.json new file mode 100644 index 00000000..c78b6ccb --- /dev/null +++ b/solutions/cncf-generated/kured/kured-814-add-signal-reboot.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:49.534Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kured: Add signal-reboot", + "description": "Based on #813\nclose #416 \nclose #722\n\nThis PR adds a `--reboot-method` flag with \"command\" (default) and \"signal\" option. The \"command\" option uses the `--reboot-command` on the host with `nsenter` as before. The new \"signal\" mode uses a `SIGRTMIN+5` signal by default against PID 1 (systemd) to reboot the node. The signal can be changed via `--reboot-signal` flag.\n\nWith this, the kured pod runs without privileged mode.\n\nThis PR is published as docker-image (amd64 and arm64): `ghcr.io/ckotzbauer/kured:1.14.0-alpha.2`\nUsage (based on the latest helm-chart) - Helm values.yaml:\n\n```yaml\nimage:\n repository: ghcr.io/ckotzbauer/kured\n tag: 1.14.0-alpha.2\nupdateStrategy: RollingUpdate\nconfiguration:\n period: \"0h0m30s\"\n rebootDelay: 0h1m0s\n rebootSentinel: /sentinel/reboot-required\nextraArgs:\n reboot-method: signal\ncontainerSecurityContext:\n readOnlyRootFilesystem: true\n privileged: false\n allowPrivilegeEscalation: false\n capabilities:\n drop: [\"*\"]\n add: [\"CAP_KILL\"]\nvolumes:\n ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Based on #49\n\nDo not merge until https://github.com/kubereboot/kured/pull/814 is released.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "image:\r\n repository: ghcr.io/ckotzbauer/kured\r\n tag: 1.14.0-alpha.2\r\nupdateStrategy: RollingUpdate\r\nconfiguration:\r\n period: \"0h0m30s\"\r\n rebootDelay: 0h1m0s\r\n rebootSentinel: /sentinel/reboot-required\r\nextraArgs:\r\n reboot-method: signal\r\ncontainerSecurityContext:\r\n readOnlyRootFilesystem: true\r\n privileged: false\r\n allowPrivilegeEscalation: false\r\n capabilities:\r\n drop: [\"*\"]\r\n add: [\"CAP_KILL\"]\r\nvolumes:\r\n - name: sentinel\r\n hostPath:\r\n path: /var/run\r\n type: Directory\r\nvolumeMounts:\r\n - name: sentinel\r\n mountPath: /sentinel\r\n readOnly: true", + "$ helm -n kured install -f ~/kured/values.yml kured kubereboot/kured\r\n$ helm -n kured list\r\nNAME \tNAMESPACE\tREVISION\tUPDATED \tSTATUS \tCHART \tAPP VERSION\r\nkured\tkured \t1 \t2023-08-08 21:50:33.438477295 +0000 UTC\tdeployed\tkured-5.1.0\t1.13.2 \r\n$ kubectl -n kured exec -it kured-vs4xb -- /bin/sh\r\n/ # kill -s SIGRTMIN+5 1", + "Aug 08 14:52:08 kermes-dev-k8s-node-a03 systemd[1]: Received SIGRTMIN+6 from PID 22396 (sh)." + ] + } + }, + "metadata": { + "tags": [ + "kured", + "sandbox", + "app-definition", + "enhancement", + "keep", + "security" + ], + "category": "workloads", + "cncfProjects": [ + "kured" + ], + "targetResourceKinds": [ + "Pod", + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kubereboot/kured/pull/814", + "sourceRepo": "kubereboot/kured", + "reactions": 4, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:48:49.534Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kusionstack/kusionstack-1235-feat-add-a-new-command-kusion-release-list.json b/solutions/cncf-generated/kusionstack/kusionstack-1235-feat-add-a-new-command-kusion-release-list.json new file mode 100644 index 00000000..3f56e36a --- /dev/null +++ b/solutions/cncf-generated/kusionstack/kusionstack-1235-feat-add-a-new-command-kusion-release-list.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:48:56.580Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kusionstack: feat: add a new command kusion release list", + "description": "Feat: Add a new command kusion release list to list all releases in one workspace\n\n#### What type of PR is this?\n\n#### What this PR does / why we need it:\n/kind feature\n\n#### Which issue(s) this PR fixes:\n\nUsage: Fixes #1093 Part-1.\nFeat: Add a new command kusion release list to list all releases in one workspace.\n\n#### Special notes for your reviewer:\nEverything works well /lgtm\nbefore only unlock command used to show, now list command can also be seen below:\n![Screenshot from 2024-07-29 14-31-59](https://github.com/user-attachments/assets/8d5dc48a-406f-4f08-bf74-ed0a2d292d3d)\n\nAll edge cases also covered if, user has no workspace, it throws error as seen in Picture.\n\n#### Does this PR introduce a user-facing change?\n\n```release-note\nIn this PR we fix the sub command issue: #1093 Part-1 \nAdd a new command kusion release list to list all releases in one workspace.\nwhen use kusion release,\nIt now shows list with unlock.\n```\n\n#### Additional documentation e.g., design docs, usage docs, ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@ekjotsinghmakhija Thanks for your contribution! Welcome to our community!", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "#### Additional documentation e.g., design docs, usage docs, etc.:\r\n\r\n", + "@SparkYuan Please review the PR.\r\n/lgtm\n\n@ekjotsinghmakhija Thanks for your contribution! Welcome to our community!\nI have removed test cases for now as I have fixed the files and due to test-cases having some runtime faults that can't be merged, i will check the test cases and fix it, currently the PR can be merged without any conflicts.\r\n@SparkYuan Now it can be merged.\r\nPlease review and merge\n## Pull Request Test Coverage Report for [Build 10301851199](https://coveralls.io/builds/69117096)\n\n\n### Details\n\n* **31** of **66** **(46.97%)** changed or added relevant lines in **2** files are covered.\n* **7** unchanged lines in **1** file lost coverage.\n* Overall coverage decreased (**-0.07%**) to **52.486%**\n\n---\n\n| Changes Missing Coverage | Covered Lines | Changed/Added Lines | % |\n| :-----|--------------|--------|---: |\n| [pkg/cmd/release/list.go](https://coveralls.io/builds/69117096/source?filename=pkg%2Fcmd%2Frelease%2Flist.go#L61) | 30 | 65 | 46.15%\n\n\n| Files with Coverage Reduction | New Missed Lines | % |\n| :-----|--------------|--: |\n| [pkg/engine/api/destroy.go](https://coveralls.io/builds/69117096/source?filename=pkg%2Fengine%2Fapi%2Fdestroy.go#L127) | 7 | 83.06% |\n\n\n| Totals | [![Coverage Status](https://coveralls.io/builds/69117096/badge)](https://coveralls.io/builds/69117096) |\n| :-- | --: |\n| Change from base [Build 10298336704](https://coveralls.io/builds/69112593): | -0.07% |\n| Covered Lines: | 8731 |\n| Relevant Lines: | 16635 |\n\n---\n##### 💛 - [Coveralls](https://coveralls.io)\n\n> I have removed test cases for now as I have fixed the files and due to test-cases having some runtime faults that can't be merged, i will check the test cases and fix it, currently the PR can be merged without any conflicts. @SparkYuan Now it can be merged. Please review and merge\r\n\r\n@ekjotsinghmakhija Sorry I can't merge your codes. All code changes must include test cases according to our community guidelines. You can fix all your test cases and submit them again. We are more than happy to review your codes once you have completed them.\n@SparkYuan Can you recheck everything works fine you I fixed the test cases also now it passes without breaking when I run test cases.\r\n\n@SparkYuan Can you fix the errors I have tried to do it from my side, I see that the commands now work, but test case have an issues, can you fix it. The command works correctly but In test cases I'm unable to fix it.\r\n\n> @SparkYuan Can you fix the errors I have tried to do it from my side, I see that the commands now work, but test case have an issues, can you fix it. The command works correctly but In test cases I'm unable to fix it.\r\n\r\nYou can find more details by clicking the [Details](https://github.com/KusionStack/kusion/actions/runs/10156694640/job/28104693315?pr=1235) link on the right. I found that there are some code-style errors and you can fix them by following hints shown in the link.\n@ekjotsinghmakhija Hi! Thanks a lot for your contribution! It is quite normal that the first submitted PR encounters these GitHub Actions errors. So please don't worry, I will help you fix them : ) \n@liu-hm19 Thank you for your support, I have written the code but it doesn't work properly also there are some issues in test cases that I'm unable to find. Can you go through my code and see where code has logic and implementation error.\n@ekjotsinghmakhija OK, no problem. I have contacted you on Slack, and you can check the message : ) \nThank you to @liu-hm19 and @SparkYuan for helping me implement the feature, Add a new command kusion release list to list all releases in one workspace\r\n\r\n\n@SparkYuan Please review the PR. Golang Lint is failing not an issue, as the function has to be used.\n@ekjotsinghmakhija Hi, thanks for the updated PR! It seems that the `revisions` and `releases` fields in the struct of `fakeWorkspaceStorage` are indeed not being used. The ones actually used are in `fakeStorageForList` 😂 You can just delete these two fields of `fakeWorkspaceStorage`. \r\n\r\nBesides, it seems that you have forgotten to add `cmd.AddCommand(NewCmdList(streams))` in the `NewCmdRel` function of `pkg/cmd/release/release.go`. Could you please update and resubmit the PR again? \n@liu-hm19 Don't know why this time Golang Lint failed :laughing: It has a problem with me I guess.\n@ekjotsinghmakhija Hi, it seems that the line break of the updated file doesn't comply with the `gofumpt`-ed style. You can change the line 58 and line 59 of `pkg/cmd/release/list_test.go` to a single line as the following: \r\n\r\nbefore:" + ] + } + }, + "metadata": { + "tags": [ + "kusionstack", + "sandbox", + "app-definition", + "help-wanted", + "kind-feature" + ], + "category": "workloads", + "cncfProjects": [ + "kusionstack" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/KusionStack/kusion/pull/1235", + "sourceRepo": "KusionStack/kusion", + "reactions": 3, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:48:56.580Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-12117-fix-tests-result-expected-should-fail-a-test.json b/solutions/cncf-generated/kyverno/kyverno-12117-fix-tests-result-expected-should-fail-a-test.json new file mode 100644 index 00000000..c1a0be66 --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-12117-fix-tests-result-expected-should-fail-a-test.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:24.047Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kyverno: fix(tests): result != expected should fail a test", + "description": "## Explanation\n\nKyverno [documentation](https://kyverno.io/docs/kyverno-cli/usage/test/) states:\n\n\"... If the actual result of the test, once executed, matches the desired result as defined in the test manifest, it will be scored as a pass in the command output. For example, if the specified result of a given test of a resource against a policy is declared to be a pass and the actual result when tested is also a pass, the command output will show as pass. If the actual result was instead a skip, the command output will show as fail because the two results do not agree.\"\n\nTherefore my assumption is: If the specified result of a given test of a resource against a policy is declared to be a **fail** and the actual result when tested is a **pass**, the command output should show as **fail** because the two results do not agree.\n\nSince https://github.com/kyverno/kyverno/pull/8212, this is no longer the case.\n\nThis PR addresses this, by failing tests if the result != expected.\n\n## Related is", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This PR fixes two tests that rely on wrong Kyverno behaviour.\n\nIn the case of `allowed-label-changes`:\n- test updating a non `breakglass` label should fail, but it succeeds. Prior to https://github.com/kyverno/kyverno/pull/12117 this is considered a `Pass` result.\n\nIn the case of `prepend-image-registry`:\n- I'm not entirely sure what the fail case for a mutating policy is, apart from the webhook being unavailable. We could probably fake it with a `resource.operation` set to something other than `CREATE` or `UPDATE`, but I've decided to delete the `result: fail` tests, and leave the tests checking if mutation happened correctly.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Loading test ( .kyverno-test/kyverno-test.yaml ) ...\r\n Loading values/variables ...\r\n Loading policies ...\r\n Loading resources ...\r\n Loading exceptions ...\r\n Applying 1 policy to 5 resources ...\r\n Checking results ...\r\n\r\n│────│─────────────────────────│────────────────────│───────────────│────────│─────────────────────│\r\n│ ID │ POLICY │ RULE │ RESOURCE │ RESULT │ REASON │\r\n│────│─────────────────────────│────────────────────│───────────────│────────│─────────────────────│\r\n│ 1 │ require-requests-limits │ validate-resources │ Pod/badpod01 │ Pass │ Ok │\r\n│ 2 │ require-requests-limits │ validate-resources │ Pod/badpod02 │ Pass │ Want fail, got pass │\r\n│ 3 │ require-requests-limits │ validate-resources │ Pod/badpod03 │ Pass │ Ok │\r\n│ 4 │ require-requests-limits │ validate-resources │ Pod/goodpod01 │ Pass │ Ok │\r\n│ 5 │ require-requests-limits │ validate-resources │ Pod/goodpod02 │ Pass │ Ok │\r\n│────│─────────────────────────│────────────────────│───────────────│────────│─────────────────────│\r\n\r\n\r\nTest Summary: 5 tests passed and 0 tests failed", + "Loading test ( .kyverno-test/kyverno-test.yaml ) ...\r\n Loading values/variables ...\r\n Loading policies ...\r\n Loading resources ...\r\n Loading exceptions ...\r\n Applying 1 policy to 5 resources ...\r\n Checking results ...\r\n\r\n│────│─────────────────────────│────────────────────│──────────────────────────│────────│─────────────────────│\r\n│ ID │ POLICY │ RULE │ RESOURCE │ RESULT │ REASON │\r\n│────│─────────────────────────│────────────────────│──────────────────────────│────────│─────────────────────│\r\n│ 1 │ require-requests-limits │ validate-resources │ v1/Pod/default/badpod01 │ Pass │ Ok │\r\n│ 2 │ require-requests-limits │ validate-resources │ v1/Pod/default/badpod02 │ Fail │ Want fail, got pass │\r\n│ 3 │ require-requests-limits │ validate-resources │ v1/Pod/default/badpod03 │ Pass │ Ok │\r\n│ 4 │ require-requests-limits │ validate-resources │ v1/Pod/default/goodpod01 │ Pass │ Ok │\r\n│ 5 │ require-requests-limits │ validate-resources │ v1/Pod/default/goodpod02 │ Pass │ Ok │\r\n│────│─────────────────────────│────────────────────│──────────────────────────│────────│─────────────────────│\r\n\r\n\r\nTest Summary: 4 tests passed and 1 tests failed\r\n\r\nAggregated Failed Test Cases :\r\n│────│─────────────────────────│────────────────────│─────────────────────────│────────│─────────────────────│\r\n│ ID │ POLICY │ RULE │ RESOURCE │ RESULT │ REASON │\r\n│────│─────────────────────────│────────────────────│─────────────────────────│────────│─────────────────────│\r\n│ 1 │ require-requests-limits │ validate-resources │ v1/Pod/default/badpod02 │ Fail │ Want fail, got pass │\r\n│────│─────────────────────────│────────────────────│─────────────────────────│────────│─────────────────────│\r\nError: 1 tests failed", + "apiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n name: roles-dictionary\r\n namespace: default\r\ndata:\r\n allowed-roles: \"[\\\"cluster-admin\\\", \\\"cluster-operator\\\", \\\"tenant-admin\\\"]\"" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security", + "cherry-pick-required", + "release-critical", + "size-xs", + "milestone-1-14-5" + ], + "category": "security", + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod", + "Configmap", + "Namespace", + "Role" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kyverno/kyverno/pull/12117", + "sourceRepo": "kyverno/kyverno", + "reactions": 4, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:45:24.047Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-13635-feat-engine-support-platform-selection-in-imageregistry-context.json b/solutions/cncf-generated/kyverno/kyverno-13635-feat-engine-support-platform-selection-in-imageregistry-context.json new file mode 100644 index 00000000..141c740b --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-13635-feat-engine-support-platform-selection-in-imageregistry-context.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:31.648Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kyverno: feat(engine): support platform selection in imageRegistry context", + "description": "## Explanation\n\nThis pull request fixes a bug where policies using the `imageRegistry` context would fail when processing container images built exclusively for non-`amd64` architectures, such as `linux/arm64`. The failure occurred because Kyverno would default to requesting the `linux/amd64` platform from the image index, causing an error if that platform was not present.\n\nThis PR resolves the issue by introducing a new feature: an optional `platform` field in the `imageRegistry` context. This addition allows policy authors to explicitly specify the target platform (e.g., \"linux/arm64\") for image lookups, enabling correct metadata resolution for single-architecture and multi-architecture images.\n\n## Related issue\n\nCloses \\#13591\n\n## Documentation (required for features)\n\nMy PR contains new or altered behavior to Kyverno.\n\n - [ ] I have sent the draft PR to add or update [the documentation](https://github.com/kyverno/website) and the link is:\n *(Note: A documentation PR will be cre", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Linter failed https://github.com/kyverno/kyverno/actions/runs/16774872374/job/47498445394?pr=13635.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: replace-image-registry-with-harbor\r\nspec:\r\n rules:\r\n - name: redirect-docker\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n mutate:\r\n foreach:\r\n - list: request.object.spec.containers\r\n context:\r\n - name: imageData\r\n imageRegistry:\r\n reference: \"{{ element.image }}\"\r\n # The new platform field is used here to target the ARM64 image\r\n platform: \"linux/arm64\"\r\n patchStrategicMerge:\r\n spec:\r\n containers:\r\n - name: \"{{ element.name }}\"\r\n image: harbor.example.com/k8s/{{imageData.repository}}:{{imageData.identifier}}", + "apiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n name: arm\r\nspec:\r\n containers:\r\n - image: registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:arm64-v18.1.0\r\n name: helper-arm" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security", + "size-xl" + ], + "category": "security", + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kyverno/kyverno/pull/13635", + "sourceRepo": "kyverno/kyverno", + "reactions": 2, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:31.648Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-1930-add-special-variable-substitution-logic-for-preconditions.json b/solutions/cncf-generated/kyverno/kyverno-1930-add-special-variable-substitution-logic-for-preconditions.json new file mode 100644 index 00000000..62453c43 --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-1930-add-special-variable-substitution-logic-for-preconditions.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:30.438Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kyverno: add special variable substitution logic for preconditions", + "description": "Signed-off-by: Max Goncharenko \n\n## Related issue\nFixes #1810\n\n## What type of PR is this\n\n/kind bug\n\n## Proposed Changes\n- Remove variable substitution logic from preconditions evaluation\n- Put special variable substitution logic before preconditions evaluation\n- Special means \"do not fail on error\" as described in expected result of the issue\n\n### Proof Manifests\n```yaml\napiVersion: kyverno.io/v1\nkind: ClusterPolicy\nmetadata:\n name: set-service-labels\n annotations:\n pod-policies.kyverno.io/autogen-controllers: none\nspec:\n background: false\n rules:\n - name: set-service-labels-pods\n match:\n resources:\n kinds:\n - Pod\n exclude:\n resources:\n namespaces:\n - \"kube*\"\n - \"openshift*\"\n - \"kube-*\"\n - \"openshift-*\"\n preconditions:\n any:\n - key: \"{{ request.object.metadata.labels.serviceOverride }}\"\n operator: NotEquals\n value: \"*?\"\n ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi @kacejot - any doubts regarding the follow-up items above? Can you please resolve conflicts? Thanks.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: set-service-labels\r\n annotations:\r\n pod-policies.kyverno.io/autogen-controllers: none\r\nspec:\r\n background: false\r\n rules:\r\n - name: set-service-labels-pods\r\n match:\r\n resources:\r\n kinds:\r\n - Pod\r\n exclude:\r\n resources:\r\n namespaces:\r\n - \"kube*\"\r\n - \"openshift*\"\r\n - \"kube-*\"\r\n - \"openshift-*\"\r\n preconditions:\r\n any:\r\n - key: \"{{ request.object.metadata.labels.serviceOverride }}\"\r\n operator: NotEquals\r\n value: \"*?\"\r\n mutate:\r\n patchStrategicMerge:\r\n metadata:\r\n labels:\r\n +(service): \"{{ request.object.metadata.labels.app }}\"\r\n spec:\r\n containers:\r\n - (name): \"*\"\r\n env:\r\n - name: \"SERVICE\"\r\n value: \"{{ request.object.metadata.labels.app }}\"\r\n - name: set-service-labels-deployments-and-sets\r\n match:\r\n resources:\r\n kinds:\r\n - Deployment\r\n - DaemonSet\r\n - StatefulSet\r\n exclude:\r\n resources:\r\n namespaces:\r\n - \"kube*\"\r\n - \"openshift*\"\r\n - \"kube-*\"\r\n - \"openshift-*\"\r\n preconditions:\r\n any:\r\n - key: \"{{ request.object.metadata.labels.serviceOverride }}\"\r\n operator: NotEquals\r\n value: \"*?\"\r\n mutate:\r\n patchStrategicMerge:\r\n spec:\r\n template:\r\n metadata:\r\n labels:\r\n +(service): \"{{ request.object.spec.template.metadata.labels.app }}\"\r\n spec:\r\n containers:\r\n - (name): \"*\"\r\n env:\r\n - name: \"SERVICE\"\r\n value: \"{{ request.object.spec.template.metadata.labels.app }}\"", + "kind: Deployment\r\napiVersion: apps/v1\r\nmetadata:\r\n name: audit-deployment\r\n labels:\r\n app: audit\r\n name: audit\r\n service: audit\r\nspec:\r\n replicas: 1\r\n selector:\r\n matchLabels:\r\n app: audit\r\n template:\r\n metadata:\r\n creationTimestamp: null\r\n labels:\r\n app: audit\r\n service: audit\r\n spec:\r\n containers:\r\n - name: audit\r\n image: busybox\r\n resources:\r\n limits:\r\n cpu: 2500m\r\n memory: 3584Mi\r\n requests:\r\n cpu: 1500m\r\n memory: 2Gi\r\n imagePullPolicy: Always\r\n restartPolicy: Always\r\n terminationGracePeriodSeconds: 30\r\n dnsPolicy: ClusterFirst\r\n securityContext: {}", + "---\r\napiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: set-service-labels\r\n annotations:\r\n pod-policies.kyverno.io/autogen-controllers: none\r\nspec:\r\n background: false\r\n rules:\r\n - name: set-service-labels-pods\r\n match:\r\n resources:\r\n kinds:\r\n - Pod\r\n exclude:\r\n resources:\r\n namespaces:\r\n - \"kube*\"\r\n - \"openshift*\"\r\n - \"kube-*\"\r\n - \"openshift-*\"\r\n preconditions:\r\n any:\r\n - key: \"{{ request.object.metadata.labels.serviceOverride }}\"\r\n operator: NotEquals\r\n value: \"*\"\r\n mutate:\r\n patchStrategicMerge:\r\n metadata:\r\n labels:\r\n +(service): \"{{ request.object.metadata.labels.app }}\"\r\n spec:\r\n containers:\r\n - (name): \"*\"\r\n env:\r\n - name: \"SERVICE\"\r\n value: \"{{ request.object.metadata.labels.app }}\"\r\n - name: set-service-labels-deployments-and-sets\r\n match:\r\n resources:\r\n kinds:\r\n - Deployment\r\n - DaemonSet\r\n - StatefulSet\r\n exclude:\r\n resources:\r\n namespaces:\r\n - \"kube*\"\r\n - \"openshift*\"\r\n - \"kube-*\"\r\n - \"openshift-*\"\r\n preconditions:\r\n any:\r\n - key: \"{{ request.object.metadata.labels.serviceOverride }}\"\r\n operator: NotEquals\r\n value: \"*?\"\r\n mutate:\r\n patchStrategicMerge:\r\n spec:\r\n template:\r\n metadata:\r\n labels:\r\n +(service): \"{{ request.object.spec.template.metadata.labels.app }}\"\r\n spec:\r\n containers:\r\n - (name): \"*\"\r\n env:\r\n - name: \"SERVICE\"\r\n value: \"{{ request.object.spec.template.metadata.labels.app }}\"" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security" + ], + "category": "security", + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Service", + "Statefulset", + "Daemonset", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kyverno/kyverno/pull/1930", + "sourceRepo": "kyverno/kyverno", + "reactions": 2, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:45:30.438Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-4461-feat-use-standard-selector-for-validationfailureactionoverrides.json b/solutions/cncf-generated/kyverno/kyverno-4461-feat-use-standard-selector-for-validationfailureactionoverrides.json new file mode 100644 index 00000000..7db98293 --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-4461-feat-use-standard-selector-for-validationfailureactionoverrides.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:25.280Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kyverno: feat: Use standard selector for validationFailureActionOverrides", + "description": "Signed-off-by: Eileen \n\n## Explanation\n\nThis PR attempts to resolve https://github.com/kyverno/kyverno/issues/4254.\n\nModify ValidationFailureActionOverrides\n- Add `NamespaceSelector`\n- Generate relative manifests\n- Implement namespace labels matching logic in engineResponse\n- Add test cases\n\n## Related issue\n\nhttps://github.com/kyverno/kyverno/issues/4254\n\n## Milestone of this PR\n\n## What type of PR is this\n\n## Proposed Changes\n\nConvert `ValidationFailureActionOverrides` spec:\n- Add `NamespaceSelector` ([k8s/metav1.LabelSelector](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#LabelSelector))\n- The new field should be functional in parallel with current `Namespaces`\n\n### Proof Manifests\n\n```yaml\n validationFailureActionOverrides:\n - action: audit # Action to apply\n namespaces:\n - dev\n namespaceSelector: # List of affected namespaces\n matchExpressions:\n - key: \"kubernetes.io/metadata.name\"\n operator: ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@Eileen-Yu - thanks for the contribution!\n\nWe need to maintain backward compatibility while introducing new fields. \n\nCan we keep both `namespaces` and `namespaceSelector` and apply both checks if they are defined in the same action?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "validationFailureActionOverrides:\r\n - action: audit # Action to apply\r\n namespaces:\r\n - dev\r\n namespaceSelector: # List of affected namespaces\r\n matchExpressions:\r\n - key: \"kubernetes.io/metadata.name\"\r\n operator: In\r\n values:\r\n - \"default\"", + "apiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n name: roles-dictionary\r\n namespace: default\r\ndata:\r\n allowed-roles: \"[\\\"cluster-admin\\\", \\\"cluster-operator\\\", \\\"tenant-admin\\\"]\"", + "name: prepend-image-registry\r\npolicies:\r\n - prepend_image_registry.yaml\r\nresources:\r\n - resource.yaml\r\nvariables: values.yaml\r\nresults:\r\n - policy: prepend-registry\r\n rule: prepend-registry-containers\r\n resource: mypod\r\n # if mutate rule\r\n patchedResource: patchedResource01.yaml\r\n kind: Pod\r\n result: pass" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security" + ], + "category": "security", + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod", + "Configmap", + "Namespace", + "Role" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kyverno/kyverno/pull/4461", + "sourceRepo": "kyverno/kyverno", + "reactions": 2, + "comments": 27 + }, + "security": { + "scannedAt": "2026-02-27T17:45:25.280Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-6248-extends-foreach-to-cover-generate-rules.json b/solutions/cncf-generated/kyverno/kyverno-6248-extends-foreach-to-cover-generate-rules.json new file mode 100644 index 00000000..6e068752 --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-6248-extends-foreach-to-cover-generate-rules.json @@ -0,0 +1,54 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:29.177Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kyverno: Extends foreach to cover generate rules", + "description": "## Explanation\nExtends foreach to cover generate rules.\n\n## Related issue\nfixes #3542\nCloses #4352\n\n## Milestone of this PR\n\nDone 👍🏽\n## What type of PR is this\n/kind feature\n\n## Proposed Changes\nAfter merging this PR, A single \"trigger\" resource will result in the creation of multiple downstream resources for an example Creation of a pod could result in generation of multiple downstream resources like configMap, secret etc.\n\n### Proof Manifests\nIf the pollicy given below is applied then multiple configMap will get created on the creation of a pod\n```yaml\napiVersion: kyverno.io/v1\nkind: ClusterPolicy\nmetadata:\n name: basic-policy\nspec:\n rules:\n - name: test\n match:\n any:\n - resources:\n kinds:\n - Pod\n generate:\n foreach:\n - list: \"request.object.spec.containers\"\n generateResources:\n - kind: ConfigMap\n apiVersion: v1\n name: \"custom-created-configmap\"\n namespace: \"{", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Explanation\n\nExtends foreach to cover generate rules.\n\n## Related issue\nFixes #3542\n\n## Milestone of this PR\nDone 👍🏽 \n## What type of PR is this\n/kind feature\n\n## Proposed Changes\n\nAfter merging this PR, A single \"trigger\" resource will result in the creation of multiple downstream resources for an example Creation of a pod could result in generation of multiple configMap.\n\n### Proof Manifests\n\nIf the pollicy given below is applied then multiple configMap will get created on the creation of a pod:\n\n```yaml\napiVersion: kyverno.io/v1\nkind: ClusterPolicy\nmetadata:\n name: basic-policy2\nspec:\n rules:\n - name: test-foreach-generate\n match:\n any:\n - resources:\n kinds:\n - Pod\n generate:\n foreach:\n - list: \"request.object.spec.containers\"\n kind: ConfigMap\n apiVersion: v1\n name: \"custom-created-configmap\"\n namespace: \"{{request.object.metadata.namespace}}\"\n synchronize: false\n data:\n data:\n foo: \"bar is my container name\"\n - list: \"request.object.spec.containers\"\n kind: ConfigMap\n apiVersion: v1\n name: \"{{element.name}}-config\"\n namespace: \"{{request.object.metadata.namespace}}\"\n synchronize: false\n data:\n data:\n foo: \"{{element.name}} is my container name\"\n - list: \"request.object.spec.containers\"\n apiVersion: v1\n kind: ConfigMap\n name: \"foreachgen-test\"\n namespace: \"default", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: basic-policy\r\nspec:\r\n rules:\r\n - name: test\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n generate:\r\n foreach:\r\n - list: \"request.object.spec.containers\"\r\n generateResources:\r\n - kind: ConfigMap\r\n apiVersion: v1\r\n name: \"custom-created-configmap\"\r\n namespace: \"{{request.object.metadata.namespace}}\"\r\n synchronize: false\r\n data:\r\n data:\r\n foo: \"{{element.name}} is my container name\"\r\n - kind: Secret\r\n apiVersion: v1\r\n name: \"custom-created-secret\"\r\n namespace: \"{{request.object.metadata.namespace}}\"\r\n synchronize: false\r\n data:\r\n data:\r\n extra: YmFyCg==", + "apiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n name: static-web\r\n labels:\r\n role: myrole\r\nspec:\r\n containers:\r\n - name: web\r\n image: nginx\r\n ports:\r\n - name: web\r\n containerPort: 80\r\n protocol: TCP", + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: basic-policy-cloned\r\nspec:\r\n rules:\r\n - name: test\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n generate:\r\n foreach:\r\n - list: \"request.object.spec.containers\"\r\n generateResources:\r\n - kind: ConfigMap\r\n apiVersion: v1\r\n name: \"{{element.name}}\"\r\n namespace: \"{{request.object.metadata.namespace}}\"\r\n synchronize: false\r\n clone:\r\n namespace: default\r\n name: custom-created-configmap\r\n - kind: Secret\r\n apiVersion: v1\r\n name: \"custom-created-secret-1\"\r\n namespace: \"{{request.object.metadata.namespace}}\"\r\n synchronize: false\r\n clone:\r\n namespace: default\r\n name: custom-created-secret" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security", + "stale" + ], + "category": "security", + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod", + "Configmap", + "Secret", + "Namespace", + "Role" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/kyverno/kyverno/pull/6248", + "sourceRepo": "kyverno/kyverno", + "reactions": 2, + "comments": 32 + }, + "security": { + "scannedAt": "2026-02-27T17:45:29.177Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-9883-feat-add-manifest-index-to-imageregistry-context.json b/solutions/cncf-generated/kyverno/kyverno-9883-feat-add-manifest-index-to-imageregistry-context.json new file mode 100644 index 00000000..b9c20da2 --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-9883-feat-add-manifest-index-to-imageregistry-context.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:33.425Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "kyverno: feat: Add Manifest Index to ImageRegistry context", + "description": "## Explanation\nThis PR adds the missing [OCI Image Index](https://github.com/opencontainers/image-spec/blob/main/image-index.md) to `imageRegistry` context, it adds the `imageIndex` field that contains the index. \nImage Index can be used to evaluate an image's supported platforms.\n\n## Related issue\nCloses #8273 @chipzoller \n\n## Milestone of this PR\n\n/milestone 1.13.0\n## Documentation (required for features)\n\nMy PR contains new or altered behavior to Kyverno. \n- [X] I have sent the draft PR to add or update [the documentation](https://github.com/kyverno/website) and the link is:\n \n https://github.com/kyverno/website/pull/1182\n\n## What type of PR is this\n\n/kind feature\n\n## Proposed Changes\nFetching `imageIndex` from image registry and output its raw JSON, same way `manifest` and `config` are being handled\n\n### Proof Manifests\n\nTo test this PR, create `cpol.yaml`:\n```yaml\napiVersion: kyverno.io/v1\nkind: ClusterPolicy\nmetadata:\n name: add-tolerations\nspec:\n rules:\n - context:\n ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Related issue #\nIssue - kyverno/kyverno#8273\nPR - kyverno/kyverno#9883\n\n## Proposed Changes\nAdd `imageIndex` to `imageData` documentation.\n\n## Checklist\n\n- [X] I have read the [contributing guidelines](https://github.com/kyverno/website/blob/main/CONTRIBUTING.md).\n- [X] I have inspected the website preview for accuracy.\n- [X] I have signed off my issue.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: add-tolerations\r\nspec:\r\n rules:\r\n - context:\r\n - imageRegistry:\r\n jmesPath: manifestIndex.manifests[?platform.architecture == 'arm64'] | length(@)\r\n reference: '{{ request.object.spec.containers[0].image }}'\r\n name: imageARMPlatform\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n mutate:\r\n patchesJson6902: |-\r\n - op: add\r\n path: \"/spec/tolerations/-\"\r\n value:\r\n key: kubernetes.io/arch\r\n operator: Equal\r\n value: arm64\r\n effect: NoSchedule\r\n name: pod-tolerations\r\n preconditions:\r\n all:\r\n - key: \"{{ imageARMPlatform || `0` }}\"\r\n operator: Equals\r\n value: 1\r\n validationFailureAction: audit", + "apiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n name: mypod\r\nspec:\r\n containers:\r\n - name: web\r\n image: nginx", + "apiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n name: mypod\r\nspec:\r\n containers:\r\n - name: web\r\n image: nginx\r\n tolerations:\r\n - key: kubernetes.io/arch\r\n operator: Equal\r\n value: arm64\r\n effect: NoSchedule" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security", + "cherry-pick-required", + "cherry-pick-completed", + "size-l" + ], + "category": "security", + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/kyverno/kyverno/pull/9883", + "sourceRepo": "kyverno/kyverno", + "reactions": 2, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:45:33.426Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-1054-implement-the-snapshot-commands.json b/solutions/cncf-generated/lima/lima-1054-implement-the-snapshot-commands.json new file mode 100644 index 00000000..02bca799 --- /dev/null +++ b/solutions/cncf-generated/lima/lima-1054-implement-the-snapshot-commands.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:36.295Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "lima: Implement the snapshot commands", + "description": "For making qcow2 snapshots of the running virtual machine.\n\nIt is possible to make multiple snapshots, in the diffdisk.\n\nCloses #1051\n\n----\n\n`limactl start`\n\n`limactl snapshot create default --tag snap`\n\n`limactl snapshot list default`\n\n`limactl snapshot apply default --tag snap`\n\n`limactl stop`\n\n`limactl snapshot delete default --tag snap`", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Allows linking only the hostagent with the driver libraries, while keeping the limactl smaller and faster to start up.\n\nProbably needs to live in \"libexec\" or something like that ? And maybe the subcommand should be renamed, too.\n\nCloses #1209", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "$ _output/bin/limactl backup --tag tag alpine\r\nINFO[0000] Sending HMP savevm command \r\n$ _output/bin/limactl restore --list alpine\r\nSnapshot list:\r\nID TAG VM SIZE DATE VM CLOCK\r\n1 tag 274 MiB 2022-09-21 19:34:50 00:02:39.937\r\n$ _output/bin/limactl backup --tag tag --rm alpine\r\nINFO[0000] Sending HMP delvm command \r\n$ _output/bin/limactl restore --list alpine\r\n$", + "TEST| [INFO] Testing online snapshots\r\ntime=\"2022-11-16T20:21:20Z\" level=info msg=\"Sending HMP savevm command\"\r\nTEST| [INFO] snapshot list: expected=snap1 got=\r\nError: ERROR] snapshot list did not return expected value", + "+ set +x\r\nTEST| [INFO] Testing online snapshots\r\ntime=\"2023-05-09T05:21:18Z\" level=info msg=\"Sending HMP savevm command\"\r\ntime=\"2023-05-09T05:21:26Z\" level=info msg=\"Sending HMP info command\"\r\nTEST| [INFO] snapshot list: expected=snap1 got=snap1\r\ntime=\"2023-05-09T05:21:26Z\" level=info msg=\"Sending HMP loadvm command\"\r\n\r\nTimed out!" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/lima-vm/lima/pull/1054", + "sourceRepo": "lima-vm/lima", + "reactions": 6, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:45:36.295Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-1147-support-for-virtualization-framework-for-macos-13.json b/solutions/cncf-generated/lima/lima-1147-support-for-virtualization-framework-for-macos-13.json new file mode 100644 index 00000000..3e06129d --- /dev/null +++ b/solutions/cncf-generated/lima/lima-1147-support-for-virtualization-framework-for-macos-13.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:38.717Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "lima: Support for Virtualization.Framework for macOS 13", + "description": "This PR provides support for using Virtualization.Framework as a optional driver.\n\nThe following are the changes done related to using drivers,\n- [x] Driver interface\n- [x] Migrate current QEMU implementation to driver\n- [x] New driver vz for Virtualization.Framework\n\nThe following are the features of lima, the vz driver should provide support for below,\n- [x] Running VM (Using both disk img and iso)*\n- [x] Slirp network for guest to host communication (uses gvisor-tap-vsock)\n- [x] Host to guest network (uses vz NAT gateway)*\n- [x] Directory sharing, supports reverse-sshfs and virtiofs (newly added)\n- [x] Port forwarding\n- [x] Host DNS resolver\n- [ ] Display (Deferring it for now as it requires runtime.LockOsThread() to be called in the beginning of hostagent start cmd)\n\n**Notes**\n- Vz driver internally converts qcow to raw image using (qemu-img convert). This is because vz only supports raw disk\n- Need to provide yaml configuration for NAT\n\n**Know Issues**\n- Serial log doesn't contain", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Done testing with all the templates,\nThere are 2 open issues that we might need to address before merging,\n1. templates with `legacyBoot: true` is not working with vz driver. If any pointer please do suggest.\n2. kernel boot logs are not coming via serial attachment with UEFI. @Code-Hex, please do suggest if am missing something.", + "steps": [ + "templates with `legacyBoot: true` is not working with vz driver. If any pointer please do suggest.", + "kernel boot logs are not coming via serial attachment with UEFI. @Code-Hex, please do suggest if am missing something." + ], + "codeSnippets": [ + "{\"level\":\"fatal\",\"msg\":\"stat /tmp/lima: no such file or directory\",\"time\":\"2022-11-12T00:19:02+09:00\"}", + "$ limactl start vz\r\nINFO[0000] Using the existing instance \"vz\" \r\nINFO[0000] Attempting to download the nerdctl archive from \"https://github.com/containerd/nerdctl/releases/download/v1.0.0/nerdctl-full-1.0.0-linux-amd64.tar.gz\" digest=\"sha256:b7f76a3bf1b8161eb94ebe885945feb2887dfc0d274f9da908a17bc0ef853eb9\"\r\nINFO[0000] Using cache \"/Users/suda/Library/Caches/lima/download/by-url-sha256/86e8280c3d639367efe7a50660ecfc4eade10b1696a9deeba27fdbf086d11098/data\" \r\nINFO[0001] [hostagent] Starting VZ (hint: to watch the boot progress, see \"/Users/suda/.lima/vz/serial.log\") \r\nINFO[0001] SSH Local Port: 57795 \r\nINFO[0001] [hostagent] new connection from to \r\nINFO[0001] [hostagent] Waiting for the essential requirement 1 of 3: \"ssh\" \r\nINFO[0001] [hostagent] [VZ] - vm state change: running \r\nINFO[0004] [hostagent] 2022/11/12 00:20:41 tcpproxy: for incoming conn 127.0.0.1:57797, error dialing \"192.168.5.15:22\": connect tcp 192.168.5.15:22: no route \r\nINFO[0015] [hostagent] Waiting for the essential requirement 1 of 3: \"ssh\" \r\nINFO[0015] [hostagent] 2022/11/12 00:20:52 tcpproxy: for incoming conn 127.0.0.1:57804, error dialing \"192.168.5.15:22\": connect tcp 192.168.5.15:22: connection was refused \r\nINFO[0025] [hostagent] Waiting for the essential requirement 1 of 3: \"ssh\" \r\nINFO[0026] [hostagent] The essential requirement 1 of 3 is satisfied \r\nINFO[0026] [hostagent] Waiting for the essential requirement 2 of 3: \"user session is ready for ssh\" \r\nERRO[0033] [hostagent] r.CreateEndpoint() = connection was refused \r\nERRO[0033] [hostagent] r.CreateEndpoint() = connection was refused \r\nINFO[0041] [hostagent] Waiting for the essential requirement 2 of 3: \"user session is ready for ssh\" \r\nINFO[0041] [hostagent] The essential requirement 2 of 3 is satisfied \r\nINFO[0041] [hostagent] Waiting for the essential requirement 3 of 3: \"the guest agent to be running\" \r\nINFO[0041] [hostagent] The essential requirement 3 of 3 is satisfied \r\nINFO[0041] [hostagent] Waiting for the optional requirement 1 of 2: \"systemd must be available\" \r\nINFO[0042] [hostagent] Forwarding \"/run/lima-guestagent.sock\" (guest) to \"/Users/suda/.lima/vz/ga.sock\" (host) \r\nINFO[0042] [hostagent] The optional requirement 1 of 2 is satisfied \r\nINFO[0042] [hostagent] Waiting for the optional requirement 2 of 2: \"containerd binaries to be installed\" \r\nINFO[0042] [hostagent] Not forwarding TCP [::]:22 \r\nINFO[0042] [hostagent] Not forwarding TCP 0.0.0.0:22 \r\nINFO[0042] [hostagent] Not forwarding TCP 127.0.0.53:53 \r\nERRO[0049] [hostagent] r.CreateEndpoint() = connection was refused \r\nINFO[0063] [hostagent] The optional requirement 2 of 2 is satisfied \r\nINFO[0063] [hostagent] Waiting for the final requirement 1 of 1: \"boot scripts must have finished\" \r\nINFO[0075] [hostagent] The final requirement 1 of 1 is satisfied \r\nINFO[0075] READY. Run `limactl shell vz` to open the shell.", + "`The vz tempalte didn't start up without mkdiring /tmp/lima`\r\nvz expects all mounting folders to be present, i will for now remove /tmp/lima from the template. In a follow-up we may look to ignore mounts with warning if not present\r\n\r\n`ERRO[0033] [hostagent] r.CreateEndpoint() = connection was refused errors in the limactl log`\r\nThis is actually from gvisor-tap-vsock (https://github.com/containers/gvisor-tap-vsock/blob/main/pkg/services/forwarder/tcp.go#L44). As far as i checked it is thrown as error but doesn't break network connection.\r\n\r\n`Writing a file on the (read-only) home fails with a cryptic error. I expect this to fail with EROFS`\r\nWill look into it. Maybe missing some mount option. Will fix it (If able to) in a separate PR. \r\n\r\n\n> In a follow-up we may look to ignore mounts with warning if not present\r\n\r\nFor consistency with QEMU, it should be automatically mkdired\n@jandubois \r\nCould you take a look?\r\nThis is being added as an experimental feature, so the implementation can be revised later at any time, but I'd like to make sure that we are fine with the YAML property (Just `vmType: \"vz\"`)\nTo be clear, this doesn't enable a Rosetta directory share yet right? Ie: https://github.com/Code-Hex/vz/blob/9de1c1041d5e560f46f9f94fc499a1cc72fd92e5/example/gui-linux/rosseta_directory_share_arm64.go#LL14C15-L14C15\nMinor nit, logs mention QEMU when doing a delete (forced or not):" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition", + "impact-changelog", + "component-vz" + ], + "category": "workloads", + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/lima-vm/lima/pull/1147", + "sourceRepo": "lima-vm/lima", + "reactions": 5, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:45:38.717Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-1359-add-builtin-yq-support-to-lima-start-command.json b/solutions/cncf-generated/lima/lima-1359-add-builtin-yq-support-to-lima-start-command.json new file mode 100644 index 00000000..84d6dde6 --- /dev/null +++ b/solutions/cncf-generated/lima/lima-1359-add-builtin-yq-support-to-lima-start-command.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:47.401Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "lima: Add builtin yq support to lima start command", + "description": "Allows modifying the yaml template \"inplace\" for a single instance,\nif you don't want to change the default or override for all instances.\n\nThis is similar to running `yq` on the template, from the command-line.\nExcept it does it on a temporary file, and for all different kinds of templates.\n\n```console\n$ yq < examples/default.yaml > default.yaml\n$\n```\n\nCurrently it removes empty lines, some quirk with the yaml parser used.\nIt also seems to change the indentation on some comments (only), but OK:\n\n```console\n$ diff -B -w examples/default.yaml default.yaml\n$\n```\n\nThis can be used as a general fallback, similar to `jq`, for modifying templates.\nInstead of adding the usual `--cpus` `--memory` `--arch`, or other parameters ?\n\n----\n\nSee for details\n\n* #545\n\n`limactl start config.yaml --yq '.arch = \"aarch64\"'`\n\nIt is not YAML syntax, but it is rather straightforward.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Also add` --tty` parameter, similar to the start command, for avoiding user interaction asking whether to start it.\n\n```console\n$ limactl edit --tty=false --set \".cpus = 2\" default\nINFO[0000] Instance \"default\" configuration edited \n$ limactl edit --tty=false --set \".cpus = 2\" default\nINFO[0000] Aborting, no changes made to the instance \n```\n\nFollow-up to:\n* #1359", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Currently it removes empty lines, some quirk with the yaml parser used.\r\nIt also seems to change the indentation on some comments (only), but OK:", + "This can be used as a general fallback, similar to `jq`, for modifying templates.\r\nInstead of adding the usual `--cpus` `--memory` `--arch`, or other parameters ?\r\n\r\n----\r\n\r\nSee for details\r\n\r\n* #545\r\n\r\n`limactl start config.yaml --yq '.arch = \"aarch64\"'`\r\n\r\nIt is not YAML syntax, but it is rather straightforward.\nAlso add` --tty` parameter, similar to the start command, for avoiding user interaction asking whether to start it.", + "Follow-up to:\r\n* #1359\n> Instead of adding the usual --cpus --memory --arch, or other parameters ?\r\n\r\nWe should add them too, for typechecking, shell completion, etc. (In separate PRs)\r\n\n> It also makes me a little sad that this little feature increases the size of the `limactl` binary by 20% (20MB → 24MB), but I guess that can't be helped, and doesn't really matter when you compare it to the size of QEMU. smile\r\n\r\nI noticed that as well.\r\n\r\nIt would be possible to make a more conservative option, either some special-case for cpus/memory/arch/etc.\r\nOr doing the `--set` approach, that would create a special override.yaml just for the particular instance.\r\n\r\nThis feature (`yq`) as such has a lot of \"overkill\" in it, like using `jq` (or even go templates) to change the list output...\nThe binary footprint seems acceptable for me\r\n\r\n\n> It would be possible to make a more conservative option,\r\n\r\nNo, I think I would prefer to have the expressiveness of `yq` expressions. I was just commenting that I noticed the size increase, but over time bloat is inevitable, and I think most users won't care (or even notice).\r\n\r\nIdle curiosity: `yq` is 30× the size of `jq`" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition", + "enhancement", + "impact-changelog", + "area-cli" + ], + "category": "workloads", + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/lima-vm/lima/pull/1359", + "sourceRepo": "lima-vm/lima", + "reactions": 2, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:45:47.401Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-188-replace-reverse-sshfs-with-samba.json b/solutions/cncf-generated/lima/lima-188-replace-reverse-sshfs-with-samba.json new file mode 100644 index 00000000..1b999565 --- /dev/null +++ b/solutions/cncf-generated/lima/lima-188-replace-reverse-sshfs-with-samba.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:45.718Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "lima: Replace reverse SSHFS with Samba", + "description": "🔴 Current blocker: nls_utf8.ko is missing in openSUSE 15.3 https://bugzilla.opensuse.org/show_bug.cgi?id=1190797\n🔴 I also have to rewrite this PR to make Samba non-default (https://github.com/lima-vm/lima/pull/188#discussion_r718677030)\n\n- - -\nReplace previous PR #118\nFix #20 (`Filesystem sharing`)\n\nSee the changes of `docs/internal.md` for the design.\n\n- On macOS hosts, `/usr/local/sbin/samba-dot-org-smbd` is used as the `smbd` binary.\n This binary can be installed with `brew install samba`.\n Apple's version of `/usr/sbin/smbd` cannot be used.\n The binary path can be overridden with `$SMBD` env var.\n\n- smbd is connected to QEMU via smb's stdio.\n The samba address 192.168.5.4:445 is only accessible from the guest, not from the host.\n\n - When the host's hostname is not present in `/etc/hosts` on the host filesystem, the Lima hostagent launches a mDNS to help looking up the hostname.\n Otherwise starting Samba takes 25 secs with \"getaddrinfo failed\" error.\n See the `pkg/samba", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Sambas are not automatically mounted yet. `mount -t cifs -o credentials=/tmp/credentials //192.168.5.4/lima-0 /mnt/tmp-0`\n\nFIXME: mount takes 25 secs, due to hostname resolution errors\n```\n[2021/07/21 18:47:12.871531, 3] ../../lib/util/util_net.c:257(interpret_string_addr_internal)\n interpret_string_addr_internal: getaddrinfo failed for name suda-mbp.local (flags 1026) [nodename nor servname provided, or not known]\n[2021/07/21 18:47:12.871626, 3] ../../source3/lib/util_sock.c:1026(get_mydnsfullname)\n get_mydnsfullname: getaddrinfo failed for name suda-mbp.local [Unknown error]\n```\n\nA workaround is to add `127.0.0.1 localhost suda-mbp.local` to `/etc/hosts` on the host, but that requires sudo.\n\n- - -\n\nTODOs:\n- supply the credential to the guest\n - via serial? via ISO?\n - Using an ACPI table is not an option, because ACPI is N/A for qemu-system-aarch64\n\n- auto mount samba mounts\n\n- remove sshfs\n\n- decrease debug level", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "[2021/07/21 18:47:12.871531, 3] ../../lib/util/util_net.c:257(interpret_string_addr_internal)\r\n interpret_string_addr_internal: getaddrinfo failed for name suda-mbp.local (flags 1026) [nodename nor servname provided, or not known]\r\n[2021/07/21 18:47:12.871626, 3] ../../source3/lib/util_sock.c:1026(get_mydnsfullname)\r\n get_mydnsfullname: getaddrinfo failed for name suda-mbp.local [Unknown error]", + "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/share/man/man8/mount_9p.8\r\n/Library/Developer/CommandLineTools/SDKs/MacOSX10.14.sdk/usr/share/man/man8/mount_9p.8\r\n/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/share/man/man8/mount_9p.8\r\n/Library/Developer/CommandLineTools/SDKs/MacOSX11.1.sdk/usr/share/man/man8/mount_9p.8\r\n/Library/Developer/CommandLineTools/SDKs/MacOSX11.3.sdk/usr/share/man/man8/mount_9p.8\r\n/sbin/mount_9p\r\n/usr/share/man/man8/mount_9p.8" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition", + "impact-changelog" + ], + "category": "workloads", + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/lima-vm/lima/pull/188", + "sourceRepo": "lima-vm/lima", + "reactions": 2, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:45:45.718Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-1913-support-for-inotify-in-mounted-directories.json b/solutions/cncf-generated/lima/lima-1913-support-for-inotify-in-mounted-directories.json new file mode 100644 index 00000000..54ee02ff --- /dev/null +++ b/solutions/cncf-generated/lima/lima-1913-support-for-inotify-in-mounted-directories.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:40.008Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "lima: Support for inotify in mounted directories", + "description": "Fixes #615 \n\n### Tasks\n- [x] Basic testing with vz (reverse-sshfs, virtiofs)\n- [x] Basic testing with qemu macOS (reverse-sshfs, 9p)\n- [ ] Basic testing with qemu linux (Need help!!)\n- [ ] Basic testing with wsl windows (Need help!!, Need to see if this is needed/wsl mount already supports this)\n- [x] Explore alternate for chmod\n\n### Approaches tried for triggering inotify in guest\n\n#### chtimes/touch (Currently used)\n- No recursion issue / vim events are also triggered\n- ~~But have issues with IDE (intellij keeps telling load file from system).~~ Fixed by passing through host time\n\n#### chmod\n- No recursion issue / vim events are also triggered\n- IDE don't have any problem\n- This deals with permissions of files in host (It would be better if we can find a alternate)\n\nDo suggest if there are someother approach for this.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@aeifn Yes yes i am working on optimising it and address these crashes on high load", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "I tried to test it on macOS 12.7.3 (Monterey) on Intel, and it didn't work for me at all, both using Alpine and Ubuntu (default), with either reverse-sshfs or 9p:", + "I then run `touch /tmp/lima/foo` on the host, and the file is visible inside the guest, but `inotifywait` doesn't report anything.\r\n\r\n`limactl start` confirms that `mountInotify` has been set:", + "> I tried to test it on macOS 12.7.3 (Monterey) on Intel,\r\n\r\nSame thing happens for me on macOS 14.3.1 (Sonoma) on M1: I don't get any inotify events (inside the VM) for files touched on the host, but I do get events for files touched inside the VM.\r\n\r\nDo I misunderstand how this is supposed to work? I expected that touch a file on the host would cause the guestagent to touch the file inside the VM, triggering an inotify event that could be captured by applications supporting hot-reload.\r\n\r\nI guess I can give it one more try using VZ, but I suspect I must be doing something else wrong.\n> I guess I can give it one more try using VZ, but I suspect I must be doing something else wrong.\r\n\r\nSame results with VZ.\r\n\r\nOnly interesting observation was seeing these 2 lines during startup:" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition", + "impact-changelog" + ], + "category": "workloads", + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/lima-vm/lima/pull/1913", + "sourceRepo": "lima-vm/lima", + "reactions": 5, + "comments": 30 + }, + "security": { + "scannedAt": "2026-02-27T17:45:40.008Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-2151-cli-flag-to-generate-autostart-files.json b/solutions/cncf-generated/lima/lima-2151-cli-flag-to-generate-autostart-files.json new file mode 100644 index 00000000..4ad13fa3 --- /dev/null +++ b/solutions/cncf-generated/lima/lima-2151-cli-flag-to-generate-autostart-files.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:42.309Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "lima: CLI flag to generate autostart files", + "description": "Added a new public CLI command: `limactl start-at-login INSTANCE --enabled`.\nThis command facilitates the generation of unit files for `launchd/systemd`, providing users with a straightforward way to control `limactl` autostart behavior.\n\nSimplified Integration:\n\nAdjusted `launchd/systemd` integration to start the `limactl hostagent` directly, simplifying the launch process.\n\nFix #2142\n\nPartialy based on #2140", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "The code looks good, but the commit seems to contain irrelevant changes.\n`git rebase master` will probably fix the issue.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition", + "area-cli" + ], + "category": "workloads", + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/lima-vm/lima/pull/2151", + "sourceRepo": "lima-vm/lima", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:45:42.309Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-2306-add-command-to-generate-jsonschema-for-limayaml.json b/solutions/cncf-generated/lima/lima-2306-add-command-to-generate-jsonschema-for-limayaml.json new file mode 100644 index 00000000..8fc0a3fe --- /dev/null +++ b/solutions/cncf-generated/lima/lima-2306-add-command-to-generate-jsonschema-for-limayaml.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:43.791Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "lima: Add command to generate jsonschema for limayaml", + "description": "Closes #2305 \n\nhttps://pypi.org/project/check-jsonschema/\n\nActually found two bugs, with the current code:\n\n```\nSchema validation errors were encountered.\n examples/default.yaml::$.vmType: None is not of type 'string'\n examples/default.yaml::$.os: None is not of type 'string'\n examples/default.yaml::$.arch: None is not of type 'string'\n examples/default.yaml::$.cpuType.armv7l: None is not of type 'string'\n examples/default.yaml::$.cpuType.aarch64: None is not of type 'string'\n examples/default.yaml::$.cpuType.x86_64: None is not of type 'string'\n examples/default.yaml::$.cpus: None is not of type 'integer'\n examples/default.yaml::$.memory: None is not of type 'string'\n examples/default.yaml::$.disk: None is not of type 'string'\n```\n```\nSchema validation errors were encountered.\n examples/docker.yaml::$.probes[0]: Additional properties are not allowed ('hint', 'script' were unexpected)\n examples/docker.yaml::$.probes[0]: 'Mode' is a required property\n examples/docker.yaml::$", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Avoid issues with converting null strings in a map value\n\nChange order to alphabetical, to match the \"limactl info\"\n\n----\n\nHelps with:\n\n* https://github.com/lima-vm/lima/pull/1069\n\n* https://github.com/lima-vm/lima/pull/2306", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Schema validation errors were encountered.\r\n examples/default.yaml::$.vmType: None is not of type 'string'\r\n examples/default.yaml::$.os: None is not of type 'string'\r\n examples/default.yaml::$.arch: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.armv7l: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.aarch64: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.x86_64: None is not of type 'string'\r\n examples/default.yaml::$.cpus: None is not of type 'integer'\r\n examples/default.yaml::$.memory: None is not of type 'string'\r\n examples/default.yaml::$.disk: None is not of type 'string'", + "Schema validation errors were encountered.\r\n examples/docker.yaml::$.probes[0]: Additional properties are not allowed ('hint', 'script' were unexpected)\r\n examples/docker.yaml::$.probes[0]: 'Mode' is a required property\r\n examples/docker.yaml::$.probes[0]: 'Description' is a required property\r\n examples/docker.yaml::$.probes[0]: 'Script' is a required property\r\n examples/docker.yaml::$.probes[0]: 'Hint' is a required property", + "Schema validation errors were encountered.\r\n examples/default.yaml::$.additionalDisks: None is not of type 'array'\r\n examples/default.yaml::$.mounts[0].mountPoint: None is not of type 'string'\r\n examples/default.yaml::$.mounts[0].writable: None is not of type 'boolean'\r\n examples/default.yaml::$.mounts[0].sshfs.cache: None is not of type 'boolean'\r\n examples/default.yaml::$.mounts[0].sshfs.followSymlinks: None is not of type 'boolean'\r\n examples/default.yaml::$.mounts[0].sshfs.sftpDriver: None is not of type 'string'\r\n examples/default.yaml::$.mounts[0].9p.securityModel: None is not of type 'string'\r\n examples/default.yaml::$.mounts[0].9p.protocolVersion: None is not of type 'string'\r\n examples/default.yaml::$.mounts[0].9p.msize: None is not of type 'string'\r\n examples/default.yaml::$.mounts[0].9p.cache: None is not of type 'string'\r\n examples/default.yaml::$.mountType: None is not of type 'string'\r\n examples/default.yaml::$.mountInotify: None is not of type 'boolean'\r\n examples/default.yaml::$.ssh.loadDotSSHPubKeys: None is not of type 'boolean'\r\n examples/default.yaml::$.ssh.forwardAgent: None is not of type 'boolean'\r\n examples/default.yaml::$.ssh.forwardX11: None is not of type 'boolean'\r\n examples/default.yaml::$.ssh.forwardX11Trusted: None is not of type 'boolean'\r\n examples/default.yaml::$.firmware.legacyBIOS: None is not of type 'boolean'\r\n examples/default.yaml::$.audio.device: None is not of type 'string'\r\n examples/default.yaml::$.video.display: None is not of type 'string'\r\n examples/default.yaml::$.video.vnc.display: None is not of type 'string'\r\n examples/default.yaml::$.upgradePackages: None is not of type 'boolean'\r\n examples/default.yaml::$.containerd.system: None is not of type 'boolean'\r\n examples/default.yaml::$.containerd.user: None is not of type 'boolean'\r\n examples/default.yaml::$.guestInstallPrefix: None is not of type 'string'\r\n examples/default.yaml::$.networks: None is not of type 'array'\r\n examples/default.yaml::$.hostResolver.enabled: None is not of type 'boolean'\r\n examples/default.yaml::$.hostResolver.ipv6: None is not of type 'boolean'\r\n examples/default.yaml::$.hostResolver.hosts: None is not of type 'object'\r\n examples/default.yaml::$.propagateProxyEnv: None is not of type 'boolean'\r\n examples/default.yaml::$.caCerts.removeDefaults: None is not of type 'boolean'\r\n examples/default.yaml::$.caCerts.files: None is not of type 'array'\r\n examples/default.yaml::$.caCerts.certs: None is not of type 'array'\r\n examples/default.yaml::$.rosetta.enabled: None is not of type 'boolean'\r\n examples/default.yaml::$.rosetta.binfmt: None is not of type 'boolean'\r\n examples/default.yaml::$.plain: None is not of type 'boolean'\r\n examples/default.yaml::$.timezone: None is not of type 'string'" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/lima-vm/lima/pull/2306", + "sourceRepo": "lima-vm/lima", + "reactions": 3, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:45:43.791Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-2411-revamp-port-forwarding-to-support-udp.json b/solutions/cncf-generated/lima/lima-2411-revamp-port-forwarding-to-support-udp.json new file mode 100644 index 00000000..aaf338d0 --- /dev/null +++ b/solutions/cncf-generated/lima/lima-2411-revamp-port-forwarding-to-support-udp.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:37.340Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "lima: Revamp port forwarding to support UDP", + "description": "Fixes #366 \nDiscussion - #2402 \n\nRevamps port forwarding to use existing GRPC communication.\n\n**Advantages**\n- No SSH daemon (See Notes below)\n- No Subprocess for SSH\n- Faster (See performance below)\n- Ease of support for newer protocol (We do support UDP as well with this implementation)\n\n**Todo**\n- [x] GRPC based tunnels\n- [x] TCP support\n- [x] UDP support\n- [x] Config to enable / disable this new port forwarding model\n- [x] Testing on MacOS\n- [ ] Testing on Linux\n- [ ] Testing on Windows\n\n**Performance**\nGRPC TCP - ~3.80 Gbits/sec\nGRPC TCP Reverse - ~4.77 Gbits/sec\nSSH TCP - ~3.38 Gbits/sec\nSSH TCP Reverse - ~3.08 Gbits/sec\n\n**Notes**\n- We will support only TCP and UDP via this approach. Unix and unixgram support socket to be added later as a separate PR\n- We support forwarding from host (Listen) -> guest (Dial). We don't support reverse forwarding host (Dial) -> guest (Listen) as of this PR. But can be extended and support it in a separate PR", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Converting to draft as it needs some more changes related to handling of ports on host side", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "services:\r\n iperf3-server:\r\n image: networkstatic/iperf3\r\n command: -s\r\n ports:\r\n - \"5201:5201/udp\"\r\n - \"5201:5201/tcp\"" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/lima-vm/lima/pull/2411", + "sourceRepo": "lima-vm/lima", + "reactions": 6, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:45:37.340Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-4137-support-for-libkrun-using-krunkit.json b/solutions/cncf-generated/lima/lima-4137-support-for-libkrun-using-krunkit.json new file mode 100644 index 00000000..f8924368 --- /dev/null +++ b/solutions/cncf-generated/lima/lima-4137-support-for-libkrun-using-krunkit.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:48.452Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "lima: Support for `libkrun` using `krunkit`", + "description": "This PR adds support for [libkrun](https://github.com/containers/libkrun) using their [krunkit](https://github.com/containers/krunkit) CLI, a dynamic library that enables programs to run processes in lightweight, isolated environments using KVM virtualization on Linux and HVF on macOS/ARM64. The key feature is to access GPU via Vulkan(Venus) inside the VM.\n\n### Implementation Details\n- **Networking:** Network connectivity is established using `gvisor-tap-vsock` for slirp functionality. Additional support for `userv2`, `shared`, and `bridged` networking modes is also included.\n\n- **File Sharing:** `virtiofs` is implemented for high-performance file sharing between the host and the guest VM.\n\n- **Guest-Host Agent Communication:** Host-to-guest agent communication currently relies on ssh. There exists a krunkit bug (see containers/krunkit#79) that fails to create the necessary Unix socket file for virtio-vsock communication.\n\n### GPU Acceleration (Venus) and OS Recommendation\nA primary us", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> @lima-vm/maintainers @lima-vm/committers Any help is appreciated! What am I doing wrong here that an IP is not getting assigned for the `krunkit` VM using `socket_vmnet`?\n> \n> For `Qemu`:\n> \n> ```shell\n> # limactl start default --vm-type=qemu --network=lima:shared\n> ➜ lima2 git:(driver/krun) ✗ arp -an | grep bridge100\n> \n> ? (192.168.105.1) at 52:ed:3c:d4:1e:64 on bridge100 ifscope permanent [bridge]\n> ? (192.168.105.2) at 52:55:55:e0:48:67 on bridge100 ifscope [bridge] # VM IP\n> ? (224.0.0.251) at 1:0:5e:0:0:fb on bridge100 ifscope permanent [ethernet]\n> ```\n> \n> For `Krunkit`:\n> \n> ```shell\n> ➜ limactl start default --vm-type=krunkit --network=lima:shared\n> WARN[0000] Template locator \"template://_images/ubuntu\" should be written \"template:_images/ubuntu\" since Lima v2.0 \n> WARN[0000] Template locator \"template://_default/mounts\" should be written \"template:_default/mounts\" since Lima v2.0 \n> ? Creating an instance \"default\" Proceed with the current configuration\n> INFO[0001] Starting socket_vmnet daemon for \"shared\" network \n> INFO[0001] Starting the instance \"default\" with external VM driver \"krunkit\" \n> INFO[0001] Attempting to download the image arch=aarch64 digest=\"sha256:26d0ac2236f12954923eb35ddfee8fa9fff3eab6111ba84786b98ab3b972c6d8\" location=\"https://cloud-images.ubuntu.com/releases/plucky/release-20250701/ubuntu-25.04-server-cloudimg-arm64.img\"\n> INFO[0001] Using cache \"/Users/ansumansahoo/Library/Caches/lima/download/by-url-sha256/eccac025a7a4709", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "`Venus support inside VM:`", + "

\r\n
\r\n\r\n
Ubuntu boot\r\n

\r\n\r\n`Lima logs:`", + "`Venus support inside VM:`" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition", + "impact-changelog", + "area-vmdrivers" + ], + "category": "workloads", + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/lima-vm/lima/pull/4137", + "sourceRepo": "lima-vm/lima", + "reactions": 1, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:45:48.453Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-4595-support-macos-guests.json b/solutions/cncf-generated/lima/lima-4595-support-macos-guests.json new file mode 100644 index 00000000..363ae736 --- /dev/null +++ b/solutions/cncf-generated/lima/lima-4595-support-macos-guests.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:41.012Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "lima: Support macOS guests", + "description": "Usage:\n```\nlimactl create --video template:macos\nlimactl start macos\nlimactl shell macos\n```\n\nThe password prompt is shown during creating an instance, so as to run `chown root:wheel ~/.lima/_mnt/0/Library/LaunchDaemons/...`, which is required for the `lima-macos-init` launch daemon to run.\n\nThe password for GUI login is randomly generated and stored in `/Users/${USER}.guest/password` in the VM.\n\nFix #3618\n\nTODOs (in follow-up PRs):\n- [x] Docs\n- [x] Switch back the default shell to `zsh`\n- [ ] Graceful `limactl stop` (currently it has to be stopped from the guest) https://github.com/lima-vm/lima/issues/4610\n- [ ] Propagate additional cloud-init data\n - [x] Hostname\n - [x] Timezone\n - [x] DNS\n - [ ] CA Certificates https://github.com/lima-vm/lima/issues/4611\n- [ ] Support non-plain mode\n - [ ] mounts https://github.com/lima-vm/lima/issues/4612\n - [ ] port forwards https://github.com/lima-vm/lima/issues/4613\n- [ ] ~Let `limactl sudoers` generate the entry for chowning files\n (`ch", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> On second thought, is the `nil` check necessary?\n\nI've wondered before why we use pointers for so many of the fields in `LimaYAML`.\n\nFor some, especially booleans, we want to be able to distinguish if the value was specified as the null value (i.e. `false`, `0`, `\"\"`), or not provided at all. But for many values the null value is not a valid choice, so e.g. the OS could just be `OS` instead of `*OS`.\n\nOr am I missing something?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "limactl create --video template:macos\r\nlimactl start macos\r\nlimactl shell macos", + "base:\r\n - internal:override\r\n - internal:user\r\n - internal:default\r\n - internal:builtin" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition", + "impact-changelog", + "guest-macos" + ], + "category": "workloads", + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/lima-vm/lima/pull/4595", + "sourceRepo": "lima-vm/lima", + "reactions": 5, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:45:41.012Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric.json b/solutions/cncf-generated/linkerd/linkerd-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric.json new file mode 100644 index 00000000..4ec09598 --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:40.677Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "linkerd: expose issuer certificate TTL as a prometheus metric", + "description": "Problem: There is currently no simple way to monitor the expiration time of the issuer certificate in use by linkerd; a surprising omission considering that issuer cert expiration will almost certainly cause visible cluster issues.\n\nSolution: \n\n- When a new issuer certificate is loaded, log its NotAfter time in unix epoch format, along with the current process wall clock time. The two timestamps are passed in via the logrus Fields pattern, allowing operators to easily pull these numbers from pod logs.\n- Register a prometheus gauge function metric to expose the TTL for monitoring\n\nFixes: https://github.com/linkerd/linkerd2/issues/11215", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "cc: @whickman :)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Subject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off", + "Introduce Pull Request Template\r\n\r\nGitHub's community guidelines recommend a pull request template, the repo was\r\nlacking one.\r\n\r\nIntroduce a `PULL_REQUEST_TEMPLATE.md` file.\r\n\r\nOnce merged, the\r\n[Community profile checklist](https://github.com/linkerd/linkerd2/community)\r\nshould indicate the repo now provides a pull request template.\r\n\r\nFixes #3321\r\n\r\nSigned-off-by: Jane Smith ", + "=== Skipped\r\n=== SKIP: viz/cmd TestRequestTapByResourceFromAPI/Should_return_error_if_stream_returned_error (0.00s)\r\n --- SKIP: TestRequestTapByResourceFromAPI/Should_return_error_if_stream_returned_error (0.00s)\r\n\r\nDONE 1063 tests, 1 skipped in 94.401s" + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/linkerd/linkerd2/pull/13615", + "sourceRepo": "linkerd/linkerd2", + "reactions": 5, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:44:40.677Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-2893-added-anti-affinity-when-ha-is-configured.json b/solutions/cncf-generated/linkerd/linkerd-2893-added-anti-affinity-when-ha-is-configured.json new file mode 100644 index 00000000..5fa91e23 --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-2893-added-anti-affinity-when-ha-is-configured.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:43.572Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "linkerd: Added Anti Affinity when HA is configured", + "description": "Fixes #1895 \n\nThe following PR adds anti-affinity rules to `proxy-injector`, `sp-validator`, `linkerd-controller`, `tap` deployments.\n\nThe idea was to make anti-affinity rules both based on `kubernetes.io/hostname` and `failure-domain.beta.kubernetes.io/zone` **preferred** when only the the `--ha` flag is configured.\n\nif the `--required-host-anti-affinity` is also configured along with `--ha`, then the `kubernetes.io/hostname` is **required** while `failure-domain.beta.kubernetes.io/zone` is still **preferred**.\n\n@ihcsim @alpeb @grampelberg @olix0r", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thankx @Pothulapati !", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "linkerd-existence\r\n-----------------\r\n√ 'linkerd-config' config map exists\r\n√ control plane replica sets are ready\r\n× no unschedulable pods\r\n linkerd-controller-7d9bdd85b8-2sk7q: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-74vsr: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-97lds: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-9lj9v: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-9sx8d: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-kbzwn: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-lcgww: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-zmsn4: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-proxy-injector-76c4f5c7d9-98gm8: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-proxy-injector-76c4f5c7d9-w7jwc: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-sp-validator-6bc6cc666b-4k5zs: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-sp-validator-6bc6cc666b-qvkdb: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-tap-8688fdf4f-c5tgb: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-tap-8688fdf4f-wmlnz: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n see https://linkerd.io/checks/#l5d-existence-unschedulable-pods for hints", + "✗ k get no -L failure-domain.beta.kubernetes.io/zone\r\nNAME STATUS ROLES AGE VERSION ZONE\r\ngke-isim-dev-ha-default-pool-4b003e42-1m9b Ready 22h v1.13.7-gke.8 us-east1-b\r\ngke-isim-dev-ha-default-pool-4b003e42-p1ps Ready 22h v1.13.7-gke.8 us-east1-b\r\ngke-isim-dev-ha-default-pool-560160bf-7sl6 Ready 20h v1.13.7-gke.8 us-east1-c\r\ngke-isim-dev-ha-default-pool-560160bf-cnzh Ready 22h v1.13.7-gke.8 us-east1-c\r\ngke-isim-dev-ha-default-pool-f4da19f4-4n3c Ready 22h v1.13.7-gke.8 us-east1-d\r\ngke-isim-dev-ha-default-pool-f4da19f4-g79p Ready 22h v1.13.7-gke.8 us-east1-d\r\n\r\n✗ k -n linkerd get po -owide\r\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\r\nlinkerd-controller-5b5765b845-7cnpz 3/3 Running 0 18m 10.60.1.27 gke-isim-dev-ha-default-pool-4b003e42-p1ps # us-east1-b\r\nlinkerd-controller-5b5765b845-j6ss9 3/3 Running 0 18m 10.60.3.20 gke-isim-dev-ha-default-pool-560160bf-cnzh # us-east1-c\r\nlinkerd-controller-5b5765b845-r92md 3/3 Running 0 18m 10.60.4.23 gke-isim-dev-ha-default-pool-f4da19f4-4n3c # us-east1-d\r\nlinkerd-grafana-7df55df848-888ck 2/2 Running 0 12m 10.60.1.30 gke-isim-dev-ha-default-pool-4b003e42-p1ps \r\nlinkerd-identity-74cf6f446f-7r57m 2/2 Running 0 18m 10.60.4.22 gke-isim-dev-ha-default-pool-f4da19f4-4n3c # us-east1-d\r\nlinkerd-identity-74cf6f446f-b9gfc 2/2 Running 0 18m 10.60.1.26 gke-isim-dev-ha-default-pool-4b003e42-p1ps #us-east1-b \r\nlinkerd-identity-74cf6f446f-fbw5w 2/2 Running 0 18m 10.60.2.20 gke-isim-dev-ha-default-pool-560160bf-7sl6 # us-east1-c\r\nlinkerd-prometheus-7bcc6c5b66-rv7n4 2/2 Running 3 18m 10.60.0.17 gke-isim-dev-ha-default-pool-4b003e42-1m9b \r\nlinkerd-proxy-injector-746bfbb494-8w8qk 2/2 Running 0 18m 10.60.3.21 gke-isim-dev-ha-default-pool-560160bf-cnzh # us-east1-c\r\nlinkerd-proxy-injector-746bfbb494-s42qm 2/2 Running 0 18m 10.60.4.24 gke-isim-dev-ha-default-pool-f4da19f4-4n3c # us-east1-d\r\nlinkerd-proxy-injector-746bfbb494-wlxxf 2/2 Running 0 18m 10.60.1.28 gke-isim-dev-ha-default-pool-4b003e42-p1ps # us-east1-b\r\nlinkerd-sp-validator-6947dff89c-4nqrs 2/2 Running 0 18m 10.60.0.18 gke-isim-dev-ha-default-pool-4b003e42-1m9b # us-east1-b\r\nlinkerd-sp-validator-6947dff89c-64vv9 2/2 Running 0 18m 10.60.2.23 gke-isim-dev-ha-default-pool-560160bf-7sl6 # us-east1-c\r\nlinkerd-sp-validator-6947dff89c-cdd4q 2/2 Running 0 18m 10.60.5.20 gke-isim-dev-ha-default-pool-f4da19f4-g79p # us-east1-d\r\nlinkerd-tap-5d7745b8c8-n9xwj 2/2 Running 0 18m 10.60.5.21 gke-isim-dev-ha-default-pool-f4da19f4-g79p # us-east1-d\r\nlinkerd-tap-5d7745b8c8-q9rtx 2/2 Running 0 18m 10.60.3.22 gke-isim-dev-ha-default-pool-560160bf-cnzh # us-east1-c\r\nlinkerd-tap-5d7745b8c8-rjznc 2/2 Running 0 18m 10.60.1.29 gke-isim-dev-ha-default-pool-4b003e42-p1ps # us-east1-b\r\nlinkerd-web-7cd4bf9d7-cd4bv 2/2 Running 0 18m 10.60.2.21 gke-isim-dev-ha-default-pool-560160bf-7sl6 ", + "✗ k -n linkerd scale deploy/linkerd-controller --replicas=10\r\ndeployment.extensions/linkerd-controller scaled\r\n✗ linkerd check\r\nlinkerd-existence\r\n-----------------\r\n√ 'linkerd-config' config map exists\r\n√ control plane replica sets are ready\r\n× no unschedulable pods\r\n linkerd-controller-5b5765b845-9nq7d: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-5b5765b845-ncztn: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-5b5765b845-tgdlp: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-5b5765b845-xshbc: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n see https://linkerd.io/checks/#l5d-existence-unschedulable-pods for hints" + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/linkerd/linkerd2/pull/2893", + "sourceRepo": "linkerd/linkerd2", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:44:43.572Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-3470-the-linkerd-proxy-does-not-work-with-headless-services.json b/solutions/cncf-generated/linkerd/linkerd-3470-the-linkerd-proxy-does-not-work-with-headless-services.json new file mode 100644 index 00000000..875a36a4 --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-3470-the-linkerd-proxy-does-not-work-with-headless-services.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:44.580Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "linkerd: The linkerd proxy does not work with headless services", + "description": "The linkerd proxy does not work with headless services (i.e. endpoints not referencing a pod).\n\nChanged endpoints_watcher to also return endpoints with no targetref. Changed endpoint_translator to handle addresses with no associated pod.\n\nFixes #3308\n\nRan tests in minikube verifying that the proxy handles headless services correctly both in cases with and without port-remapping.\n\nSigned-of-by: Johannes Hansen ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Thanks @JohannesEH. We really appreciate you taking the time to submit a fix and for providing helpful context about the change!", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "panic: runtime error: invalid memory address or nil pointer dereference\r\n[signal SIGSEGV: segmentation violation code=0x1 addr=0xb0 pc=0x13feefa]\r\n\r\ngoroutine 288 [running]:\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTranslator).toWeightedAddr(0xc000969630, 0xc000427300, 0xc, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0004d1f40, ...)\r\n\t/linkerd-build/controller/api/destination/endpoint_translator.go:123 +0x3a\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTranslator).Add(0xc000969630, 0xc00049c5d0)\r\n\t/linkerd-build/controller/api/destination/endpoint_translator.go:50 +0x1c5\r\ngithub.com/linkerd/linkerd2/controller/api/destination/watcher.(*portPublisher).subscribe(0xc0004ae340, 0x1b1d100, 0xc000969630)\r\n\t/linkerd-build/controller/api/destination/watcher/endpoints_watcher.go:501 +0x68\r\ngithub.com/linkerd/linkerd2/controller/api/destination/watcher.(*servicePublisher).subscribe(0xc000422480, 0xc000000050, 0x0, 0x0, 0x1b1d100, 0xc000969630)\r\n\t/linkerd-build/controller/api/destination/watcher/endpoints_watcher.go:322 +0xda\r\ngithub.com/linkerd/linkerd2/controller/api/destination/watcher.(*EndpointsWatcher).Subscribe(0xc0004064e0, 0xc0005806d2, 0x5, 0xc0005806c0, 0x11, 0xc000000050, 0x0, 0x0, 0x1b1d100, 0xc000969630, ...)\r\n\t/linkerd-build/controller/api/destination/watcher/endpoints_watcher.go:158 +0x290\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*server).Get(0xc0003a8a20, 0xc0009694f0, 0x1b4a800, 0xc0005916d0, 0x0, 0x0)\r\n\t/linkerd-build/controller/api/destination/server.go:123 +0x70e\r\ngithub.com/linkerd/linkerd2-proxy-api/go/destination._Destination_Get_Handler(0x16aaf00, 0xc0003a8a20, 0x1b42280, 0xc0006a4ec0, 0xc0009694a0, 0x20)\r\n\t/go/pkg/mod/github.com/linkerd/linkerd2-proxy-api@v0.1.9/go/destination/destination.pb.go:1823 +0x109\r\ngithub.com/grpc-ecosystem/go-grpc-prometheus.StreamServerInterceptor(0x16aaf00, 0xc0003a8a20, 0x1b424c0, 0xc0002de3c0, 0xc0006a4d60, 0x192c7d0, 0x1b30440, 0xc000329c20)\r\n\t/go/pkg/mod/github.com/grpc-ecosystem/go-grpc-prometheus@v0.0.0-20160910222444-6b7015e65d36/server.go:40 +0xe3\r\ngoogle.golang.org/grpc.(*Server).processStreamingRPC(0xc0000ad980, 0x1b55a20, 0xc000766480, 0xc0004aa900, 0xc0004069c0, 0x294db00, 0x0, 0x0, 0x0)\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:1209 +0x462\r\ngoogle.golang.org/grpc.(*Server).handleStream(0xc0000ad980, 0x1b55a20, 0xc000766480, 0xc0004aa900, 0x0)\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:1282 +0xd3f\r\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.1(0xc000322710, 0xc0000ad980, 0x1b55a20, 0xc000766480, 0xc0004aa900)\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:717 +0x9f\r\ncreated by google.golang.org/grpc.(*Server).serveStreams.func1\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:715 +0xa1", + "YAML I used for testing:", + "Using a `curl` container:" + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [ + "Pod", + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/linkerd/linkerd2/pull/3470", + "sourceRepo": "linkerd/linkerd2", + "reactions": 2, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:44:44.580Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-3921-linkerd-cli-chocolatey-package.json b/solutions/cncf-generated/linkerd/linkerd-3921-linkerd-cli-chocolatey-package.json new file mode 100644 index 00000000..24d212c0 --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-3921-linkerd-cli-chocolatey-package.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:41.605Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "linkerd: Linkerd CLI Chocolatey Package", + "description": "This draft PR aims to fix #3063 by building a chocolatey package for linkerd2's Windows CLI\nThe package currently exists in the folder `windowspkg`\n\nTo build (on windows system):\n- Install chocolatey\n- cd into `windowspkg/linkerd2-cli`\n- Run `choco pack`\n- Run `choco install linkerd2-cli -s . ` , make sure cmd is running as administrator\n- Run `refreshenv` to add linkerd2-cli path to system\n- To unistall run `choco uninstall linkerd2-cli`\n\nThe following is further work to be done:\n- Fill `linkerd2-cli.nuspec` appropriately\n- Fill `LICENSE.txt` and `VERIFICATION.txt` appropriately\n\nFuture work:\n- After approval future commits will add auto update feature for the package\n\nSigned-off-by: Animesh Narayan Dangwal ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi @drholmie! I'm not familiar with Chocolatey or windows package management in general so I'm hoping you can help answer some fairly basic questions.\n\nIs the goal here simply to help with installation of the Linkerd CLI on windows (as opposed to solving distribution of the package)? In other words, the workflow would be to clone this repository and then run the above Chocolatey commands to install the CLI? \n\nPushing a Linkerd package to some online repository is out of scope here, I assume?\n\nIt looks like this package works by simply downloading the Linkerd CLI binary from the github releases page. This is a bit unintuitive for me because if I'm installing a tool from within a git repository, my default expectation is that it would be built from the current source. Is this standard practice for Chocolatey packages?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "The use of .nupkg or .nuspec in for package name or source is known to cause issues. Please use the package id from the nuspec `` with `-s .` (for local folder where nupkg is found).\r\nChocolatey v0.10.15\r\nInstalling the following packages:\r\nlinkerd.2.6.1.nupkg\r\nBy installing you accept licenses for the packages.\r\n\r\nlinkerd v2.6.1\r\nlinkerd package files install completed. Performing other installation steps.\r\n The install of linkerd was successful.\r\n Software install location not explicitly set, could be in package or\r\n default install location if installer.\r\n\r\nChocolatey installed 1/1 packages.\r\n See the log for details (C:\\ProgramData\\chocolatey\\logs\\chocolatey.log).\r\n\r\nDid you know the proceeds of Pro (and some proceeds from other\r\n licensed editions) go into bettering the community infrastructure?\r\n Your support ensures an active community, keeps Chocolatey tip top,\r\n plus it nets you some awesome features!\r\n https://chocolatey.org/compare", + "2020-03-11 11:41:17,680 16656 [DEBUG] - Capturing package files in 'C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm'\r\n2020-03-11 11:41:17,684 16656 [DEBUG] - Found 'C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm\\helm-v3.1.1-windows-amd64.zip.txt'\r\n with checksum '43EDDB5E1207ACA9AD67A244B00F3E3F'\r\n2020-03-11 11:41:17,685 16656 [DEBUG] - Found 'C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm\\kubernetes-helm.nupkg'\r\n with checksum '007ECBDD3B5B2C7C4B67D35248E7E76A'\r\n2020-03-11 11:41:17,686 16656 [DEBUG] - Found 'C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm\\kubernetes-helm.nuspec'\r\n with checksum '92B8494DF71659253F359C7FF5DB30F2'\r\n2020-03-11 11:41:17,687 16656 [DEBUG] - Found 'C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm\\tools\\chocolateyInstall.ps1'\r\n with checksum '8F5A0D1F826C8FE6DDCBB7762384758E'\r\n2020-03-11 11:41:18,629 16656 [DEBUG] - Found 'C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm\\tools\\windows-amd64\\helm.exe'\r\n with checksum 'ACE06BEF62D3F7ED0D6BD077E0BBA181'\r\n2020-03-11 11:41:18,632 16656 [DEBUG] - Found 'C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm\\tools\\windows-amd64\\LICENSE'\r\n with checksum '6A18660411AF65E17F370BDBB50E6957'\r\n2020-03-11 11:41:18,633 16656 [DEBUG] - Found 'C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm\\tools\\windows-amd64\\README.md'\r\n with checksum 'D19035E20DAA65E3E2B204A5D9AB6B71'\r\n2020-03-11 11:41:18,651 16656 [DEBUG] - Calling command ['\"C:\\ProgramData\\chocolatey\\tools\\shimgen.exe\" --path=\"..\\\\lib\\kubernetes-helm\\tools\\windows-amd64\\helm.exe\" --output=\"C:\\ProgramData\\chocolatey\\bin\\helm.exe\" --iconpath=\"C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm\\tools\\windows-amd64\\helm.exe\"']\r\n2020-03-11 11:41:19,246 16656 [DEBUG] - [ShimGen] [WARN ] Could not extract icon from associated program. Using default. Error:\r\n2020-03-11 11:41:19,248 16656 [DEBUG] - [ShimGen] Selected Icon is invalid\r\n2020-03-11 11:41:19,460 16656 [DEBUG] - [ShimGen] Microsoft (R) Visual C# Compiler version 4.8.3752.0\r\n2020-03-11 11:41:19,462 16656 [DEBUG] - [ShimGen] for C# 5\r\n2020-03-11 11:41:19,463 16656 [DEBUG] - [ShimGen] Copyright (C) Microsoft Corporation. All rights reserved.\r\n2020-03-11 11:41:19,464 16656 [DEBUG] - [ShimGen] This compiler is provided as part of the Microsoft (R) .NET Framework, but only supports language versions up to C# 5, which is no longer the latest version. For compilers that support newer versions of the C# programming language, see http://go.microsoft.com/fwlink/?LinkID=533240\r\n2020-03-11 11:41:19,466 16656 [DEBUG] - [ShimGen] ShimGen has successfully created 'C:\\ProgramData\\chocolatey\\bin\\helm.exe'\r\n2020-03-11 11:41:19,484 16656 [DEBUG] - Command ['\"C:\\ProgramData\\chocolatey\\tools\\shimgen.exe\" --path=\"..\\\\lib\\kubernetes-helm\\tools\\windows-amd64\\helm.exe\" --output=\"C:\\ProgramData\\chocolatey\\bin\\helm.exe\" --iconpath=\"C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm\\tools\\windows-amd64\\helm.exe\"'] exited with '0'\r\n2020-03-11 11:41:19,486 16656 [INFO ] - ShimGen has successfully created a shim for helm.exe\r\n2020-03-11 11:41:19,488 16656 [DEBUG] - Created: C:\\ProgramData\\chocolatey\\bin\\helm.exe\r\n Targeting: C:\\ProgramData\\chocolatey\\lib\\kubernetes-helm\\tools\\windows-amd64\\helm.exe\r\n IsGui:False", + "2020-03-11 11:49:08,807 16552 [DEBUG] - Capturing package files in 'C:\\ProgramData\\chocolatey\\lib\\linkerd'\r\n2020-03-11 11:49:08,810 16552 [DEBUG] - Found 'C:\\ProgramData\\chocolatey\\lib\\linkerd\\linkerd.nupkg'\r\n with checksum 'E08D51FD0E9C3E70C3914620A6700E87'\r\n2020-03-11 11:49:08,811 16552 [DEBUG] - Found 'C:\\ProgramData\\chocolatey\\lib\\linkerd\\linkerd.nuspec'\r\n with checksum 'BA376888E0C78AF84ADDA2870DA4052D'" + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/linkerd/linkerd2/pull/3921", + "sourceRepo": "linkerd/linkerd2", + "reactions": 4, + "comments": 27 + }, + "security": { + "scannedAt": "2026-02-27T17:44:41.605Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addresses.json b/solutions/cncf-generated/linkerd/linkerd-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addresses.json new file mode 100644 index 00000000..f20a4402 --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addresses.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:39.715Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "linkerd: Ignore pods with status.phase=Succeeded when watching IP addresses", + "description": "Ignore pods with status.phase=Succeeded when watching IP addresses\n\nWhen a pod terminates successfully, some CNIs will assign its IP address\nto newly created pods. This can lead to duplicate pod IPs in the same\nKubernetes cluster.\n\nFilter out pods which are in a Succeeded phase since they are not \nroutable anymore.\n\nFixes #5394", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This looks great to me. @kforsthoevel are you able to test this branch and confirm if it fixes #5394 for you?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "❯ env GITHUB_TOKEN='..' bin/install-pr --k3d 5412\r\n..\r\nINFO[0004] DONE \r\n/home/kevin/Projects/linkerd/linkerd2\r\n\r\nLinkerd CLI available:\r\n/home/kevin/Projects/linkerd/linkerd2/target/release/linkerd2-cli-git-65f0d802-linux-amd64", + "./target/release/linkerd2-cli-git-65f0d802-linux-amd64 install |kubectl apply -f -" + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/linkerd/linkerd2/pull/5412", + "sourceRepo": "linkerd/linkerd2", + "reactions": 6, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:44:39.715Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-5757-rabbitmq-integration-tests.json b/solutions/cncf-generated/linkerd/linkerd-5757-rabbitmq-integration-tests.json new file mode 100644 index 00000000..d85a3d17 --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-5757-rabbitmq-integration-tests.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:42.564Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "linkerd: Rabbitmq integration tests.", + "description": "Subject\n Add integration tests for external resources\nProblem\n linkerd changes sometimes causes regressions on external components that users have installed in their stacks such as rabbitmq. \nSolution\nNew integration test, adds functionality to install additional components and run basic test on them to make sure linkerd changes do not have an adverse effect\n This change will include the following steps\n Deploy a rabbitmq server\n Deploy a rabbitmq client, the code for that client is hosted at https://github.com/barkardk/integration\n Use a golden file for output verification\n Uses the test suite to inject linkerd into the deployments \n Client then creates a queue, a message and consumes said message and sends a log output.\n The linkerd testutil compares the output from the client with a golden file\n\nFixes #5605\n\nSigned-off-by: Kristin Barkardottir ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@barkardk If you merge in the latest `main`, it should pick up the fix causing CI to fail right now.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/linkerd/linkerd2/pull/5757", + "sourceRepo": "linkerd/linkerd2", + "reactions": 3, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:44:42.564Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/litmus/litmus-4538-show-login-with-sso-button-when-dex-is-enabled.json b/solutions/cncf-generated/litmus/litmus-4538-show-login-with-sso-button-when-dex-is-enabled.json new file mode 100644 index 00000000..d49f5ae9 --- /dev/null +++ b/solutions/cncf-generated/litmus/litmus-4538-show-login-with-sso-button-when-dex-is-enabled.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:50.796Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "litmus: Show `Login with SSO` button when Dex is enabled", + "description": "## Proposed changes\n\nFixes #4236 \n\n![Screenshot 2024-03-17 at 17 03 30](https://github.com/litmuschaos/litmus/assets/10204970/bb9d66a1-d459-4796-ba45-4707fb5edd5d)\n\n## Types of changes\n\nWhat types of changes does your code introduce to Litmus? Put an `x` in the boxes that apply\n- [x] New feature (non-breaking change which adds functionality)\n- [ ] Bugfix (non-breaking change which fixes an issue)\n- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)\n- [ ] Documentation Update (if none of the other choices applies)\n\n## Checklist\n\nPut an `x` in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code.\n- [x] I have read the [CONTRIBUTING](https://github.com/litmuschaos/litmus/blob/master/CONTRIBUTING.md) doc\n- [x] I have signed the commit for DCO to be passed.\n- [x", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@DarthBenro008 Can you also review this PR?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "litmus", + "incubating", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "litmus" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/litmuschaos/litmus/pull/4538", + "sourceRepo": "litmuschaos/litmus", + "reactions": 15, + "comments": 3 + }, + "security": { + "scannedAt": "2026-02-27T17:45:50.797Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/meshery/meshery-11100-docs-deploying-wordpress-and-mysql-with-persistent-volumes-with-me.json b/solutions/cncf-generated/meshery/meshery-11100-docs-deploying-wordpress-and-mysql-with-persistent-volumes-with-me.json new file mode 100644 index 00000000..1f57e028 --- /dev/null +++ b/solutions/cncf-generated/meshery/meshery-11100-docs-deploying-wordpress-and-mysql-with-persistent-volumes-with-me.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:02.480Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "meshery: [Docs] Deploying WordPress And MySql With Persistent Volumes with Meshery tutorial", + "description": "**Notes for Reviewers**\n\nThis PR fixes #11068 \n\n**[Signed commits](https://github.com/meshery/meshery/blob/master/CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**\n- [x] Yes, I signed my commits.", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "🚀 Preview for commit d852ff7d6c4587177afaeb3952ec510b028103ce at: https://665db503003792603e0f3694--meshery-docs-preview.netlify.app", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "meshery", + "sandbox", + "networking", + "area-docs" + ], + "category": "networking", + "cncfProjects": [ + "meshery" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/meshery/meshery/pull/11100", + "sourceRepo": "meshery/meshery", + "reactions": 2, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:49:02.480Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/meshery/meshery-2677-versioning-poc.json b/solutions/cncf-generated/meshery/meshery-2677-versioning-poc.json new file mode 100644 index 00000000..5d29d7a9 --- /dev/null +++ b/solutions/cncf-generated/meshery/meshery-2677-versioning-poc.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:01.481Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "meshery: Versioning poc", + "description": "**Description**\nA PoC about versioning the Docs Site.\nI have a index.html to parse the last most recent version registered, in the _data/versions.yml file (made a little template there).\nAnd then I included the dropdown version selector from the left sidebar.\nIt would imply that every version to be published here has a specific DOCS files and content. For everyone of these, permalinks would be updated.\nAnd for every new version, the process would be:\n1. Create a folder in docs/versions/vX.x.m \n 1. (v= version, X= ver Number, x=major change, m=min change)\n2. Add the version data in the docs/_data/versions.yml (following template)\n3. Edit the files for the version documentation\n4. Update the permalink in FRONT MATTER of each file\n5. Done\n\nThis PR fixes #1304 \n\n**Notes for Reviewers**\nPlease check this out @chandrashritii, @jesuslerma, @anirudhjain75, @leecalcote \nThanks in advance.\n\n**[Signed commits](https://github.com/layer5io/meshery/blob/master/CONTRIBUTING.md#signing-off-on-comm", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fancy! 😄 \n\"Screen", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "meshery", + "sandbox", + "networking", + "issue-stale", + "area-docs" + ], + "category": "networking", + "cncfProjects": [ + "meshery" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/meshery/meshery/pull/2677", + "sourceRepo": "meshery/meshery", + "reactions": 2, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:49:01.481Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/meshery/meshery-2744-add-patterns-list-subcommand.json b/solutions/cncf-generated/meshery/meshery-2744-add-patterns-list-subcommand.json new file mode 100644 index 00000000..6a5c32a2 --- /dev/null +++ b/solutions/cncf-generated/meshery/meshery-2744-add-patterns-list-subcommand.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:00.410Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "meshery: Add patterns list subcommand ", + "description": "**Description**\nFixed a few issues raised \n- signed commits \n- rename `--all` to `--verbose`\n- fixed linting errors\nThis PR fixes #2623\n\n**Notes for Reviewers**\n\n**[Signed commits](https://github.com/layer5io/meshery/blob/master/CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**\n- [x] Yes, I signed my commits.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@s1ntaxe770r will you sync your fork so that the other commits are not included in this PR? You'll find instructions here - https://github.com/layer5io/meshery/blob/master/CONTRIBUTING-gitflow.md.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "meshery", + "sandbox", + "networking", + "component-mesheryctl" + ], + "category": "networking", + "cncfProjects": [ + "meshery" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/meshery/meshery/pull/2744", + "sourceRepo": "meshery/meshery", + "reactions": 2, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:49:00.411Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metal3-io/metal3-io-1176-bug-power-off-nodes-upon-deletion.json b/solutions/cncf-generated/metal3-io/metal3-io-1176-bug-power-off-nodes-upon-deletion.json new file mode 100644 index 00000000..48ecc56a --- /dev/null +++ b/solutions/cncf-generated/metal3-io/metal3-io-1176-bug-power-off-nodes-upon-deletion.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:58.895Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metal3-io: :bug: Power off nodes upon deletion", + "description": "This is a continuation of #816 which in turn tries to fix #410. \n\nCo-authored-by: Sandhya Dasu @sadasu", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/test-centos-integration-main \n/test-ubuntu-integration-main", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "metal3-io", + "incubating", + "app-definition", + "lgtm", + "approved", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "metal3-io" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metal3-io/baremetal-operator/pull/1176", + "sourceRepo": "metal3-io/baremetal-operator", + "reactions": 1, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:45:58.895Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metal3-io/metal3-io-1440-replace-uses-of-kubectl-and-cmctl-binaries-in-e2e.json b/solutions/cncf-generated/metal3-io/metal3-io-1440-replace-uses-of-kubectl-and-cmctl-binaries-in-e2e.json new file mode 100644 index 00000000..0f0b8e32 --- /dev/null +++ b/solutions/cncf-generated/metal3-io/metal3-io-1440-replace-uses-of-kubectl-and-cmctl-binaries-in-e2e.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:01.896Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metal3-io: 🌱 Replace uses of kubectl and cmctl binaries in e2e", + "description": "**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #1373 \n\nI chose different implementations from what was suggested in #1373:\n- ~~Instead of using `krusty` package, I copied [the kustomize build workflow used by capi](https://github.com/Nordix/cluster-api/blob/f44c0ebd94aa7409af0db22bfce0953e2fd702ab/test/framework/clusterctl/repository.go#L226). This is only for simplicity, and to be aligned with CAPI. The `krusty` package seemed to be not too complicated, but imo the simplier, the better.~~\nChanged (back) to `krusty` after discussion with @lentzi90.\n- The `cert-manager` workflow from CAPI takes into control many of CAPI internal data structures, therefore is not easy to replicate. For our usecase, I think simply download the cert-manager manifests, apply and ~~wait for all deployments to be ready~~ perform a dry-run of self-signed issuer&certificate creation on cert-manage", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/test-centos-e2e-integration-main\n/test-ubuntu-integration-main", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "metal3-io", + "incubating", + "app-definition", + "lgtm", + "approved", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "metal3-io" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metal3-io/baremetal-operator/pull/1440", + "sourceRepo": "metal3-io/baremetal-operator", + "reactions": 0, + "comments": 16 + }, + "security": { + "scannedAt": "2026-02-27T17:46:01.896Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metal3-io/metal3-io-1881-add-test-for-baremetalhost-controller-updateeventhandler.json b/solutions/cncf-generated/metal3-io/metal3-io-1881-add-test-for-baremetalhost-controller-updateeventhandler.json new file mode 100644 index 00000000..465886ae --- /dev/null +++ b/solutions/cncf-generated/metal3-io/metal3-io-1881-add-test-for-baremetalhost-controller-updateeventhandler.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:59.866Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metal3-io: 🌱Add test for baremetalhost controller updateEventHandler", + "description": "**What this PR does / why we need it**:\n\n~~When using this method, the `saveHostStatus` will requeue the object and create more reconciles for the object when the only thing that has changed is the lastupdated field in the status. This leads to multiple reconciles because this happens during each run through the reconciler.~~\n\nThis adds a test to the baremetalhost controller to validate that the saveHostStatus will not requeue the object and create more reconciles when lastUpdated is updated.\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\n~~Fixes https://github.com/metal3-io/baremetal-operator/issues/1253~~", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "metal3-io", + "incubating", + "app-definition", + "size-s", + "lgtm", + "approved", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "metal3-io" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metal3-io/baremetal-operator/pull/1881", + "sourceRepo": "metal3-io/baremetal-operator", + "reactions": 1, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:45:59.866Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metal3-io/metal3-io-340-rename-ready-state-to-available.json b/solutions/cncf-generated/metal3-io/metal3-io-340-rename-ready-state-to-available.json new file mode 100644 index 00000000..ac4e044d --- /dev/null +++ b/solutions/cncf-generated/metal3-io/metal3-io-340-rename-ready-state-to-available.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:54.735Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metal3-io: Rename Ready state to Available", + "description": "Ready is a status also used by the Node resource, but with a different meaning\nwhich causes unnecessary confusion especially on the UI side where Host and Node\nstatus is combined\n\nFixes #315 \n~~Depends on https://github.com/metal3-io/metal3-dev-env/pull/137~~", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Bare Metal Host 'Ready' provisioning status is changed to 'Available'\n\nRequired by https://github.com/metal3-io/baremetal-operator/pull/340", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "FAIL - centos-0 baremetal host in correct state : provisioned\r\n expected provisioned, got provisioning" + ] + } + }, + "metadata": { + "tags": [ + "metal3-io", + "incubating", + "app-definition", + "lgtm", + "approved", + "size-l", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "metal3-io" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/metal3-io/baremetal-operator/pull/340", + "sourceRepo": "metal3-io/baremetal-operator", + "reactions": 2, + "comments": 53 + }, + "security": { + "scannedAt": "2026-02-27T17:45:54.735Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metal3-io/metal3-io-816-attempt-to-hard-power-off-node-before-it-is-deleted.json b/solutions/cncf-generated/metal3-io/metal3-io-816-attempt-to-hard-power-off-node-before-it-is-deleted.json new file mode 100644 index 00000000..a4fc27e0 --- /dev/null +++ b/solutions/cncf-generated/metal3-io/metal3-io-816-attempt-to-hard-power-off-node-before-it-is-deleted.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:57.821Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metal3-io: Attempt to hard power off node before it is deleted", + "description": "When a BareMetalHost is deleted, power it off before performing\nthe delete operation.\nFixes #410", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "When a BareMetalHost is deleted, power it off before performing\nthe delete operation.\nFixes #410", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "metal3-io", + "incubating", + "app-definition", + "size-l", + "needs-rebase", + "lifecycle-stale" + ], + "category": "workloads", + "cncfProjects": [ + "metal3-io" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metal3-io/baremetal-operator/pull/816", + "sourceRepo": "metal3-io/baremetal-operator", + "reactions": 1, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:45:57.821Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metal3-io/metal3-io-841-better-handling-of-power-management-errors.json b/solutions/cncf-generated/metal3-io/metal3-io-841-better-handling-of-power-management-errors.json new file mode 100644 index 00000000..2ace0285 --- /dev/null +++ b/solutions/cncf-generated/metal3-io/metal3-io-841-better-handling-of-power-management-errors.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:45:56.326Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metal3-io: Better handling of power management errors", + "description": "Attempts to fix https://github.com/metal3-io/baremetal-operator/issues/828\n\nThis fix should have the following components based on the above bug:\n1. If Ironic accepts the power state change but is unable to carry it out (expressed via LastError from ironic), that should result in PowerOn() and PowerOff() returning an ErrorMessage. \n2. Refactor PowerOff() in such a way that if soft power off fails, it proceeds to hard power off without reporting an error. It is possible that this logic is moved out of here and into Ironic itself.\n3. Add a force flag to the PowerOff() so that it skips soft power off and directly proceeds to hard power off after the previous call results in an error.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "When a BareMetalHost is deleted, power it off before performing\nthe delete operation.\nFixes #410", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "metal3-io", + "incubating", + "app-definition", + "lgtm", + "approved", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "metal3-io" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metal3-io/baremetal-operator/pull/841", + "sourceRepo": "metal3-io/baremetal-operator", + "reactions": 1, + "comments": 30 + }, + "security": { + "scannedAt": "2026-02-27T17:45:56.326Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metal3-io/metal3-io-903-cache-only-required-secrets.json b/solutions/cncf-generated/metal3-io/metal3-io-903-cache-only-required-secrets.json new file mode 100644 index 00000000..805ca2ac --- /dev/null +++ b/solutions/cncf-generated/metal3-io/metal3-io-903-cache-only-required-secrets.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:00.895Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metal3-io: Cache only required secrets", + "description": "This PR limits the BMO secrets caching just to those ones really required, by filtering secrets watch request through a new label selector.\nThis will fix the problem of the very high memory usage detected when BMO is configured to watch the entire cluster (`WATCH_NAMESPACE=\"\"`), and moreover will keep in cache just the BMH related secrets.\n\nFixes https://github.com/metal3-io/baremetal-operator/issues/904", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/hold requires `controller-runtime 0.9.0`", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "metal3-io", + "incubating", + "app-definition", + "size-xxl", + "lgtm", + "approved" + ], + "category": "workloads", + "cncfProjects": [ + "metal3-io" + ], + "targetResourceKinds": [ + "Secret", + "Namespace" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metal3-io/baremetal-operator/pull/903", + "sourceRepo": "metal3-io/baremetal-operator", + "reactions": 0, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:46:00.895Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-1174-controller-custom-annotation-for-pinning-ips-single-and-dual-stack.json b/solutions/cncf-generated/metallb/metallb-1174-controller-custom-annotation-for-pinning-ips-single-and-dual-stack.json new file mode 100644 index 00000000..27017c31 --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-1174-controller-custom-annotation-for-pinning-ips-single-and-dual-stack.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:17.303Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metallb: Controller: custom annotation for pinning ips (single and dual stack)", + "description": "This PR aims to fix #1155 issue by adding new custom annotation field `metallb.universe.tf/loadBalancerIPs `which can be used to request specific IPs (single and dual stack). For now this would coexist with `svc.Spec.LoadBalancerIP `field. when both fieds are specified, then annotation field takes higher precedence than spec field.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Left a couple of comments, I'd also add an e2e test to make sure this works on the happy path.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metallb/metallb/pull/1174", + "sourceRepo": "metallb/metallb", + "reactions": 7, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:49:17.303Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-466-add-dual-stack-support-in-the-controller.json b/solutions/cncf-generated/metallb/metallb-466-add-dual-stack-support-in-the-controller.json new file mode 100644 index 00000000..1ea8d4ec --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-466-add-dual-stack-support-in-the-controller.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:24.001Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metallb: Add dual-stack support in the controller", + "description": "Fixes #464 \n\n- [x] Add a `isIPv6` parameter to the pool Allocate functions\n- [x] Check `isIPv6` and allocate from a pool with the correct family\n- [x] Add test for the new function in `allocator_test.go`\n- [x] Add controller tests\n- [x] Investigate statistics and update if necessary https://github.com/danderson/metallb/pull/466#issuecomment-533507879", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "The functions can now be tested on a k8s cluster with PR https://github.com/kubernetes/kubernetes/pull/79386 applied.\n\n#### Metallb config;\n```\napiVersion: v1\nkind: ConfigMap\nmetadata:\n namespace: default\n name: config\ndata:\n config: |\n peers:\n address-pools:\n - name: default\n protocol: layer2\n addresses:\n - 10.0.0.0/28\n - 1000::/124\n```\nCIRDs with different families defined for the pool.\n\nServices with different families are created;\n```\napiVersion: v1\nkind: Service\nmetadata:\n name: mconnect-ipv4\nspec:\n ipFamily: IPv4\n selector:\n app: mconnect\n ports:\n - port: 5001\n type: LoadBalancer\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: mconnect-ipv6\nspec:\n ipFamily: IPv6\n selector:\n app: mconnect\n ports:\n - port: 5001\n type: LoadBalancer\n```\n\nThe `controller` will assign loadBalancerIP from the right family;\n```\n# kubectl get svc\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\ncoredns ClusterIP 12.0.0.2 53/UDP,53/TCP 39s\nkubernetes ClusterIP 12.0.0.1 443/TCP 40s\nmconnect-ipv4 LoadBalancer 12.0.98.75 10.0.0.0 5001:30063/TCP 9s\nmconnect-ipv6 LoadBalancer fd00:4000::d742 1000:: 5001:31771/TCP 9s\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n namespace: default\r\n name: config\r\ndata:\r\n config: |\r\n peers:\r\n address-pools:\r\n - name: default\r\n protocol: layer2\r\n addresses:\r\n - 10.0.0.0/28\r\n - 1000::/124", + "apiVersion: v1\r\nkind: Service\r\nmetadata:\r\n name: mconnect-ipv4\r\nspec:\r\n ipFamily: IPv4\r\n selector:\r\n app: mconnect\r\n ports:\r\n - port: 5001\r\n type: LoadBalancer\r\n---\r\napiVersion: v1\r\nkind: Service\r\nmetadata:\r\n name: mconnect-ipv6\r\nspec:\r\n ipFamily: IPv6\r\n selector:\r\n app: mconnect\r\n ports:\r\n - port: 5001\r\n type: LoadBalancer", + "# kubectl get svc\r\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\r\ncoredns ClusterIP 12.0.0.2 53/UDP,53/TCP 39s\r\nkubernetes ClusterIP 12.0.0.1 443/TCP 40s\r\nmconnect-ipv4 LoadBalancer 12.0.98.75 10.0.0.0 5001:30063/TCP 9s\r\nmconnect-ipv6 LoadBalancer fd00:4000::d742 1000:: 5001:31771/TCP 9s" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metallb/metallb/pull/466", + "sourceRepo": "metallb/metallb", + "reactions": 2, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:49:24.001Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-527-use-hashicorp-memberlist-to-speedup-dead-node-detection.json b/solutions/cncf-generated/metallb/metallb-527-use-hashicorp-memberlist-to-speedup-dead-node-detection.json new file mode 100644 index 00000000..82638505 --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-527-use-hashicorp-memberlist-to-speedup-dead-node-detection.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:22.937Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metallb: Use hashicorp/memberlist to speedup dead node detection", + "description": "This is an early POC to fix #298.\nIt needs a lot of cleanup, but without tuning I get failover around 3s \\o/", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Great idea! I really think using Kubernetes to do the failover handling is not a practical approach. Do you have any indication how this will scale with e.g. 50 or 80 nodes?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "namespace: metallb-system\r\n\r\nresources:\r\n - metallb.yaml\r\n\r\nconfigMapGenerator:\r\n- name: config\r\n files:\r\n - configs/config\r\n\r\nsecretGenerator:\r\n- name: memberlist\r\n files:\r\n - configs/secretkey\r\n\r\ngeneratorOptions:\r\n disableNameSuffixHash: true" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metallb/metallb/pull/527", + "sourceRepo": "metallb/metallb", + "reactions": 2, + "comments": 21 + }, + "security": { + "scannedAt": "2026-02-27T17:49:22.937Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-562-rfc-fix-conflicting-arp-when-ip-is-shared.json b/solutions/cncf-generated/metallb/metallb-562-rfc-fix-conflicting-arp-when-ip-is-shared.json new file mode 100644 index 00000000..db84311d --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-562-rfc-fix-conflicting-arp-when-ip-is-shared.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:19.044Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metallb: [RFC] Fix conflicting ARP when IP is shared", + "description": "This is just a RFC to show the idea. Not tested yet.\n\nWhen IP is shared by two services, it may be annonced from different\nnodes causing conflicting arp responses.\n\nThis commit fixes by using service IP in the hash instead of service\nname so that service sharing the same IP will have the same master.\n\nFor traffic cluster services, all nodes should be usable instead of\nthose running pods, so that services sharing IPs have the same set of\nusable nodes.\n\nFixed #558", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Fixes #302.\nAs the issue states, even if service specifies `externalTrafficPolicy: Cluster`, MetalLB will not announce if no endpoint reside in the node with speakers. This can occur if speakers are deployed only on a subset of nodes in the cluster.\n\nThis commit makes use of `activeNodes` as a fallback if no usable nodes are available. If memberlist is not enabled, it will act same as if the fallback is disabled.\nThe idea is basically same as #613, with minor code implementation differences.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "docker.io/kvaps/metallb-controller:a3047c4d\r\ndocker.io/kvaps/metallb-speaker:a3047c4d" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition", + "bug", + "protocol-layer2", + "do-not-merge" + ], + "category": "workloads", + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Node" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/metallb/metallb/pull/562", + "sourceRepo": "metallb/metallb", + "reactions": 3, + "comments": 33 + }, + "security": { + "scannedAt": "2026-02-27T17:49:19.044Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-658-re-add-helm-chart-under-charts-metallb.json b/solutions/cncf-generated/metallb/metallb-658-re-add-helm-chart-under-charts-metallb.json new file mode 100644 index 00000000..a7e2ae92 --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-658-re-add-helm-chart-under-charts-metallb.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:16.240Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metallb: Re-add helm chart under charts/metallb", + "description": "Create a new helm chart integrating features of the previous chart, as\nwell as stable/metallb and bitnami/metallb.\n\nFeatures:\n* Support new environment variables in speaker\n* Use PodMonitor instead of ServiceMonitor (no need to create Services)\n* Create PrometheusRule to detect stale config and config-not-loaded\n* Configurable PrometheusRule alerts for address pool exhaustion\n* Support MetalLB controller creating memberlist secret\n* Standardize labels using template helper\n* Create config-watcher and pod-lister roles\n* OPA/rego based chart output validation\n* JSON Schema values validation\n\nFixes metallb/metallb#653", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "@gclawes `v0.9.4` is now released :slightly_smiling_face: - FYI.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "set -ex\r\n\r\n # install openssl and kubectl\r\n echo \"http://dl-cdn.alpinelinux.org/alpine/edge/testing\" >> /etc/apk/repositories\r\n apk --no-cache add openssl kubectl\r\n\r\n # generate a random string with no newlines\r\n # store in file secret does not leak to logs\r\n openssl rand -base64 128 > /tmp/secret\r\n\r\n # create secret\r\n kubectl \\\r\n create secret generic metallb-memberlist \\\r\n --from-file=secretkey=/tmp/secret", + "# helm install metallb --namespace metallb-system metallb --version 0.9.4 --set existingConfigMap=config\r\nError: unable to build kubernetes objects from release manifest: error validating \"\": error validating data: [ValidationError(Role.rules[0]): unknown field \"resources:\" in io.k8s.api.rbac.v1.PolicyRule, ValidationError(Role.rules[1]): unknown field \"resources:\" in io.k8s.api.rbac.v1.PolicyRule]", + "# helm install metallb --namespace metallb-system bitnami/metallb --version 0.1.28 --set existingConfigMap=config\r\n...\r\n# helm upgrade metallb --namespace metallb-system metallb --version 0.9.4 --set existingConfigMap=config\r\nRelease \"metallb\" has been upgraded. Happy Helming!\r\nNAME: metallb\r\nLAST DEPLOYED: Sat Oct 24 21:11:26 2020\r\nNAMESPACE: metallb-system\r\nSTATUS: deployed\r\nREVISION: 2" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition", + "enhancement" + ], + "category": "workloads", + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Secret", + "Role" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metallb/metallb/pull/658", + "sourceRepo": "metallb/metallb", + "reactions": 22, + "comments": 31 + }, + "security": { + "scannedAt": "2026-02-27T17:49:16.241Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-723-go-mod-use-k8s-io-client-go-v0-20-0.json b/solutions/cncf-generated/metallb/metallb-723-go-mod-use-k8s-io-client-go-v0-20-0.json new file mode 100644 index 00000000..2fdaa051 --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-723-go-mod-use-k8s-io-client-go-v0-20-0.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:27.258Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metallb: go.mod: use k8s.io/client-go@v0.20.0", + "description": "Use a defined version of k8s.io/client-go.\n\nFixes #722", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "These tests no longer work. They rely on the now archived virtuakube,\nwhich doesn't build against kube 1.19, which ends up blocking fixing\nmetallb for kube 1.19. See #723 for discussion of where it gets in the\nway.\n\nAt this point we should just drop this directory and re-introduce\ne2etest code once someone is able to work on it. We can always pull\nthis back out of git history if it helps.\n\nThe commit contents are the result of roughly:\n\n - git rm -r e2etest\n - go mod tidy\n - git add go.mod go.sum\n\nCloses #192 \nCloses #629\nCloses #639", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "/go/pkg/mod/go.universe.tf/virtuakube@v0.0.0-20190708182722-512c11153571/cluster.go:194:47: not enough arguments in call to c.client.CoreV1().Nodes().List have (\"k8s.io/apimachinery/pkg/apis/meta/v1\".ListOptions)", + "find . -name cluster.go" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metallb/metallb/pull/723", + "sourceRepo": "metallb/metallb", + "reactions": 1, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:49:27.258Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-833-design-metallb-crd-controller-enhancement.json b/solutions/cncf-generated/metallb/metallb-833-design-metallb-crd-controller-enhancement.json new file mode 100644 index 00000000..d9cda7ee --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-833-design-metallb-crd-controller-enhancement.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:21.895Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metallb: design: MetalLB CRD controller enhancement", + "description": "This design document discusses implementing MetalLB Custom Resource definition\"CRD\"\nas a mechanism to configure MetalLB's layer2 and BGP features instead of using ConfigMap.\n\nran mdl against the new md file\n```\nmdl 0001-metallb-crd.md \n```\nSigned-off-by: Mohamed Mahmoud \n\nFixes https://github.com/metallb/metallb/issues/196.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## General\n\nFixes #529.\n\nThis PR adds a new feature to MetalLB called **peer autodiscovery**. This feature allows MetalLB to create BGP peers by discovering their configuration from annotations and labels on `Node` Kubernetes objects. The rationale and use case for this is explained in https://github.com/metallb/metallb/issues/529.\n\nIn addition to the main functionality, this PR adds a **status endpoint** to the speaker. This is an HTTP endpoint which allows querying the internal state of a speaker. The endpoint is available per protocol (BGP/Layer 2) at the following URI: `http://:7473/status/`.\n\nWhile the endpoint is added generically at the \"protocol\" level, this PR implements a handler for BGP only.\n\nDocumentation as well as sample configuration for this feature are included with this PR.\n\nDiscovered peers (or \"node peers\") can coexist with \"regular\" peers - the user is free to use peer autodiscovery alone, in conjunction with static peer configuration or not use autodiscovery at all. The implementation ensures existing functionality isn't broken and filters duplicate peers in case a node peer that's identical to a static peer is discovered.\n\nFinally, the user has full flexibility in determining exactly which BGP parameters should be discovered automatically as well as which annotations/labels to use for determining the values. The implementation is generic and doesn't expect a specific annotation/label format so as not to couple MetalLB to a specific i", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "mdl 0001-metallb-crd.md", + "git clone git@github.com:kinvolk/metallb.git\r\ncd metallb\r\ngit checkout johananl/peer-autodiscovery\r\n\r\n# Run the unit tests\r\ninv test\r\n\r\n# Deploy MetalLB to a local cluster\r\ninv dev-env -p bgp", + "kubectl -n metallb-system get pods" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition", + "design-proposal" + ], + "category": "workloads", + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [ + "Configmap" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/metallb/metallb/pull/833", + "sourceRepo": "metallb/metallb", + "reactions": 3, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:49:21.895Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-837-support-endpoint-slices.json b/solutions/cncf-generated/metallb/metallb-837-support-endpoint-slices.json new file mode 100644 index 00000000..a50c8bf4 --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-837-support-endpoint-slices.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:20.127Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metallb: Support endpoint slices", + "description": "Endpoint slices are a more scalable way to distribute service endpoints.\n\nHere we check if they are enabled on the cluster, and we set up the\nlisteners and the logic to use them, otherwise we fallback to use\nendpoints.\n\nFixes #811", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> Thanks for the PR! How did you test this? Did you try it in the metallb kind-based dev env? I see the unit test cases you added, so thank you for that!\n\nThis is one of the things I wanted feedback on. I deployed a kind + ovnk cluster (where I knew ep slices were enabled), deployed there and checked that the events were propagated up to the listeners (with extra logs, now removed).\nOnce there, the other segment of the path is covered by the unit tests. Not sure if there's a better way to do that.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metallb/metallb/pull/837", + "sourceRepo": "metallb/metallb", + "reactions": 3, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:49:20.127Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-895-implement-leveled-logging.json b/solutions/cncf-generated/metallb/metallb-895-implement-leveled-logging.json new file mode 100644 index 00000000..d97e582a --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-895-implement-leveled-logging.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:25.620Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "metallb: Implement leveled logging", + "description": "Signed-off-by: Utku Ozdemir \n\nThis PR implements leveled logging. I set the levels as I've seen fit by looking at their messages - any feedbacks are appreciated.\nFixes #254 \nFixes #655", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## General\n\nThis was a fix for #589. Now it's just the end of the SpeakerList refactor.\n\nAt the beginning, this PR was just a full rework of memberlist and bits of layer2. A part of it was merged along the way and it's still big in part because it undoes https://github.com/metallb/metallb/pull/662/commits/cf494a91b267626753530c47ae482250ae1886eb.\n\nIt does multiple cleanups / fixes:\n\n1) Stop using Memberlist.Members() as it's racy.\n2) Remove the direct dependency between SpeakerList and k8s.Client.\n3) Stop polling the API and use watches.\n4) Only consider ready speakers when memberlist is disabled.\n\nTo achieve no. 3, there is one user-facing change: We introduce a headless speaker service.\n\n## TODO\n\n- [ ] Tests", + "steps": [ + "Stop using Memberlist.Members() as it's racy.", + "Remove the direct dependency between SpeakerList and k8s.Client.", + "Stop polling the API and use watches.", + "Only consider ready speakers when memberlist is disabled." + ], + "codeSnippets": [ + "$ inv dev-env -p layer2\r\nkind version\r\nkind v0.10.0 go1.15.7 linux/amd64\r\ngo build -v -o build/amd64/controller/controller -ldflags '-X go.universe.tf/metallb/internal/version.gitCommit=25e1592a -X go.universe.tf/metallb/internal/version.gitBranch=pr-895' go.universe.tf/metallb/controller\r\ngo.universe.tf/metallb/internal/logging\r\n# go.universe.tf/metallb/internal/logging\r\ninternal/logging/logging.go:56:14: level.NewFilter undefined (type string has no field or method NewFilter)\r\ninternal/logging/logging.go:147:15: level.AllowAll undefined (type string has no field or method AllowAll)\r\ninternal/logging/logging.go:149:15: level.AllowDebug undefined (type string has no field or method AllowDebug)\r\ninternal/logging/logging.go:151:15: level.AllowInfo undefined (type string has no field or method AllowInfo)\r\ninternal/logging/logging.go:153:15: level.AllowWarn undefined (type string has no field or method AllowWarn)\r\ninternal/logging/logging.go:155:15: level.AllowError undefined (type string has no field or method AllowError)\r\ninternal/logging/logging.go:157:15: level.AllowNone undefined (type string has no field or method AllowNone)", + "$ go version\r\ngo version go1.16.4 linux/amd64" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/metallb/metallb/pull/895", + "sourceRepo": "metallb/metallb", + "reactions": 2, + "comments": 26 + }, + "security": { + "scannedAt": "2026-02-27T17:49:25.620Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/nats/nats-5057-added-opentelemetry-support.json b/solutions/cncf-generated/nats/nats-5057-added-opentelemetry-support.json new file mode 100644 index 00000000..8feea522 --- /dev/null +++ b/solutions/cncf-generated/nats/nats-5057-added-opentelemetry-support.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:04.034Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "nats: Added OpenTelemetry support", + "description": "If the `Nats-Trace-Dest` header is not present, but `Traceparent` is and its last token is `01`, then message tracing is triggered. This also requires that the account be defined with a `trace_dest` subject so that traces can be sent there.\nNote that `Nats-Trace-Only` is not applicable for `Traceparent`.\n\nAddition to PR #5014\nResolves #5052\n\nSigned-off-by: Ivan Kozlovic ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@derekcollison Updated the PR based on your comments. But also would want @ripienaar to have a look at it tomorrow to see if that matches its expectations for otel support.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "acc := NewAccount(\"MyAccount\")\r\nacc.TraceDest = \"my.trace.dest\"\r\nopts.Accounts = []*Account{acc}\r\n...\r\ns, err := NewServer(opts)\r\n..." + ] + } + }, + "metadata": { + "tags": [ + "nats", + "incubating", + "networking" + ], + "category": "networking", + "cncfProjects": [ + "nats" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/nats-io/nats-server/pull/5057", + "sourceRepo": "nats-io/nats-server", + "reactions": 2, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:46:04.034Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/notary-project/notary-project-916-feat-upgrade-to-oci-1-1.json b/solutions/cncf-generated/notary-project/notary-project-916-feat-upgrade-to-oci-1-1.json new file mode 100644 index 00000000..033c927b --- /dev/null +++ b/solutions/cncf-generated/notary-project/notary-project-916-feat-upgrade-to-oci-1-1.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:06.210Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "notary-project: feat: upgrade to OCI 1.1", + "description": "This PR upgrades Notation to OCI 1.1.\n\nMajor changes **[UPDATE as of 4/2/2024 after community meeting]**:\n1. New flag `--force-referrers-tag` is introduced. And it is only applied to the `Sign` command. It's default to `true`, and it's NOT an `experimental` flag. The original `experimental` flag `--allow-referrers-api` will be hidden, i.e., description and example will be hidden from help page. It is kept only for backwards compatibility purpose, and a warning will be printed out when user sets it.\n2. Sign: \n ```\n # Default behavior: Use the referrers tag schema for backwards compatibility.\n notation sign ...\n notation sign --force-referrers-tag ...\n \n # With `--force-referrers-tag=false`: Check the Referrers API first, if not supported, automatically fallback to the referrers tag schema.\n notation sign --force-referrers-tag=false ...\n ```\n3. Verify/List/Inspect: They will always use the Referrers API first, if not supported, automatically fallback to the", + "type": "upgrade", + "status": "completed", + "resolution": { + "summary": "I am kind of @shizhMSFT 's proposal, i.e., \n\n- `--force-referrers-tag` and its default value is true: \nFrom end users' point of view, they care about how signature can be stored in the registry. Apparently, the signature will be pushed and stored as a \"sha256-xxxxx\" tag in the registry with the default `--force-referrers-tag=true`. So `--force-referrers-tag` seems much more straightforward for end users to understand how the signature is stored in the registry. \n- Keep `--allow-referrers-api` as an alias: \nFor some popular registries that already supported OCI v1.1 and integrated Notation, such as [Harbor](https://goharbor.io/docs/2.9.0/working-with-projects/working-with-images/sign-images/#use-notationexperimental-to-sign-artifacts-with-distribution-spec-v11-mode) and Zot, `--allow-referrers-api` has been referenced in their documentation. Keep the original flag name will be friendly and non-breaking for those.\n\nFor @sudo-bmitch and @priteshbandi 's proposals above, `--force-1-1-compatibility` and `force-oci-1-1` sound a bit vague and technical because most of end users are not aware of the OCI spec and different versions. In general, users care about which signature format they can use to store the signature in registry.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "# Default behavior: Use the referrers tag schema for backwards compatibility.\r\n notation sign ...\r\n notation sign --force-referrers-tag ...\r\n \r\n # With `--force-referrers-tag=false`: Check the Referrers API first, if not supported, automatically fallback to the referrers tag schema.\r\n notation sign --force-referrers-tag=false ...", + "

\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/notaryproject/notation/pull/916?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=notaryproject). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=notaryproject).\n\nThis may be too late to change, but if not, I'd suggest renaming the flag to something more clear to end users:", + "Default the value to true for now, and on a 2.x release, change the default to false.\r\n\r\nFrom an outside view, `--allow-referrers-api` is confusing since the signing operation isn't calling the API and the client has no say in whether the registry will include the content in the referrers response, that's automatic based on the `subject` field.\r\n\r\nEdit: note this is a focus on the notation version, rather than the OCI version, since users would more likely know the version of their notation clients while they don't necessarily know the version of their registry.\n> This may be too late to change, but if not, I'd suggest renaming the flag to something more clear to end users:\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "notary-project", + "incubating", + "security" + ], + "category": "security", + "cncfProjects": [ + "notary-project" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/notaryproject/notation/pull/916", + "sourceRepo": "notaryproject/notation", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:46:06.210Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1204-improved-audience-handling-to-support-client-credentials-acces.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1204-improved-audience-handling-to-support-client-credentials-acces.json new file mode 100644 index 00000000..03a79835 --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1204-improved-audience-handling-to-support-client-credentials-acces.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:48.354Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: improved audience handling to support client credentials access tokens without aud claims", + "description": "This PR adds extra options to optionally skip `aud` verification, if `aud` is missing in the bearer token and the possibility to define an alternative claim to verify against the client id:\n- `skip-aud-check-when-missing` (bool)\n- `audience-verification-claim` (string)\n- `extra-audiences-for-verification` ([]string)\n\n### skip-aud-check-when-missing\nIf enabled, audience is only verified if the `aud` claim exists in the token. If not, there is no verification of audience.\n\nThis will internally set `SkipClientIDCheck` in the underlying go-oidc library to true.\nhttps://github.com/AOEpeople/oauth2-proxy/blob/69dfbce99a158556010da4d60c1d5864abd86e3a/pkg/validation/options.go#L123\n\nIf set, the verification will happen within oauth2-proxy to have more control over the details of token verification:\nhttps://github.com/AOEpeople/oauth2-proxy/blob/69dfbce99a158556010da4d60c1d5864abd86e3a/providers/provider_data.go#L175\n\n### audience-verification-claim\nCan be specified along with `skip-aud-check", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@kschu91 @czenker Are you using both client credentials and user ID Tokens? Seems we need to do some work on this PR to allow them both to co-exist", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition", + "enhancement" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/1204", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 4, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:49:48.354Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-15-whitelist-domains.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-15-whitelist-domains.json new file mode 100644 index 00000000..7396efec --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-15-whitelist-domains.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:39.949Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: Whitelist domains", + "description": "Adds a `whitelist-domain` flag that can be used to whitelist a set of domains for the redirect parameter in the authentication request", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I wrote this on Friday to fix #399 \n\nTidied it up this morning and went to create PR noticing #461 had been created.\n\nI think this better fixes #399 as it looks for domain suffixes rather than explicit domains which I think was the point of the discussion.\n\nTake from this what you will", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition", + "enhancement" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/15", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 9, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:49:39.949Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-150-fix-redirects-on-sign-in-when-skip-provider-button-is-set.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-150-fix-redirects-on-sign-in-when-skip-provider-button-is-set.json new file mode 100644 index 00000000..404bc5ec --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-150-fix-redirects-on-sign-in-when-skip-provider-button-is-set.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:41.505Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: Fix redirects on /sign_in when -skip-provider-button is set", + "description": "Push the logic down a level: now the dispatch happens in SignInPage, where the correct redirect URI is known.\n\nFixes #30, hopefully\n\n## How Has This Been Tested?\n\nManually tested /oauth2/sign_in and /oauth2/start endpoints with and without -skip-provider-button, with `X-Auth-Request-Redirect` header set for /oauth2/sign_in requests, and the `rd=` URL parameter set for /oauth2/start requests.\n\n## Checklist:\n\n- [ ] My change requires a change to the documentation or CHANGELOG.\n- [ ] I have updated the documentation/CHANGELOG accordingly.\n- [x] I have created a feature (non-master) branch for my PR.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "accept rd param and X-Auth-Request-Redirect header from\nboth /sign_in and /start handlers\n\navoid accidental redirect to either /sign_in or /start handlers\n\ninspired by https://github.com/pusher/oauth2_proxy/pull/150\n\n------\n\nUpdated in February 2020 with the related:\n\ncheck for /\\ redirects\n\ntricky open-redirect vulnerability, see\nGHSA-qqxw-m5fj-f7gv", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition", + "stale" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/150", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 8, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:49:41.505Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1500-implementation-of-multiple-providers.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1500-implementation-of-multiple-providers.json new file mode 100644 index 00000000..36337245 --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1500-implementation-of-multiple-providers.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:36.626Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: Implementation of Multiple Providers", + "description": "* Added BUILDPLATFORM arg to dockerfile in order to pass arg from docker-compose build commands\n* Updated make file to support a multiple provider setup\n* Updated docker compose to support build + up\n* updated alpha configs to actually work\n* Updated sign_in template to support multiple providers\n* Updated validation package to loop over the provider array and validate all providers\n* Added Provider ID to Providerdata struct\n* Added Provider ID to the state so callback knows which provider to use\n* Added dynamic oauth2/{id}/start routes for each provider ( /oauth2/start uses [0] provider still )\n* Converted providers.Provider interface to a slice in oauth struct.\n* Updated unit tests to support provider struct.\n* Return 404/400 if a /oauth2/{id}/start request does not match a providerid\n* Added validation to check for a provider ID and providerName in each provider - will shutdown if both do not exist.\n* Added Cookie refresh logic to select the correct provider\n* Update documentation f", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Description, Motivation and Context\n\nThis is a possible implementation of the facility discussed in #1314 to make it possible to pass through query parameters from `/oauth2/start` to the Identity Provider login URL in a secure and controlled way. Motivating use cases include\n\n - wanting most requests to use `prompt=login` by default, but with the possibility to override this to `prompt=select_account` or `prompt=consent` in certain specific cases\n - passing a `login_hint` when you already know the user's email address (#1369)\n - passing additional non-standard parameters required by certain providers, e.g. `organization` when using auth0 (https://github.com/oauth2-proxy/oauth2-proxy/issues/1314#issuecomment-950273012)\n\nI've introduced a generic mechanism whereby an oauth2-proxy user can configure `loginURLParameters` in the \"alpha\" YAML config to define the parameters that may be passed through from `/oauth2/start` to the IdP. This is a map from the query parameter name to a set of configuration options:\n\n```yaml\nloginURLParameters:\n prompt:\n default: [\"login\"]\n allowed: [\"select_account\", \"consent\"]\n login_hint:\n allowAny: true\n organization:\n default: [\"org1\"]\n```\n\nThe interaction between these options is not entirely intuitive, but the logic is intended to work as follows:\n\n- only URL parameters named under `loginURLParameters` are candidates to be passed through from `/oauth2/start?...` to the IdP, any other parameters on the `/oauth2/start` URL will b", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "I still have the following point to look at - but will open another PR for this point.\r\n\r\n* Currently combining all certificates for providers if offered - I think each provider needs its own http client so we have flexibility to handle combinations of providers with different settings (i.e. 1 provider has custom cert but another does not) - this will be a big task so i will probably create another PR\r\n\r\n\r\n## Description\r\n\r\n* Added BUILDPLATFORM arg to dockerfile in order to pass arg from docker-compose build commands\r\n* Updated make file to support a multiple provider setup\r\n* Updated docker compose to support build + up\r\n* updated alpha configs to actually work\r\n* Updated sign_in template to support multiple providers\r\n* Updated validation package to loop over the provider array and validate all providers\r\n* Added Provider ID to Providerdata struct\r\n* Added Provider ID to the state so callback knows which provider to use\r\n* Added dynamic oauth2/{id}/start routes for each provider ( /oauth2/start uses [0] provider still )\r\n* Converted providers.Provider interface to a slice in oauth struct.\r\n* Updated unit tests to support provider struct.\r\n* Return 404/400 if a /oauth2/{id}/start request does not match a providerid\r\n* Added validation to check for a provider ID and providerName in each provider - will shutdown if both do not exist.\r\n* Added Cookie refresh logic to select the correct provider\r\n* Update documentation for multiple provider config\r\n\r\n## Motivation and Context\r\n\r\n Important for environments with multiple companies etc\r\n https://github.com/oauth2-proxy/oauth2-proxy/issues/926 Closes #926\r\n\r\n## How Has This Been Tested?\r\n\r\n\r\ntested locally with Keycloak and Dex as multiple providers\r\n\r\n\r\n* Tested locally via docker compose using keycloak and dex as providers\r\n* Still must check new session setup with redis.\r\n* Will test this branch version on our kubernetes feature branch sandboxes over the next month - first with 1 provider to ensure no functionality is lost, then with mutiple afterwards\r\n* Tests still need to be updated \r\n\r\nWill test with Azure provider in our work environment.\r\n\r\n\r\n\r\nCurrently it should not affect anything else other than breaking the sign-in-message override, as it can not support multiple providers.\r\n\r\n## Checklist:\r\n\r\n\r\n\r\n\r\n- [x] My change requires a change to the documentation or CHANGELOG.\r\n- [x] I have updated the documentation/CHANGELOG accordingly.\r\n- [x] I have created a feature (non-master) branch for my PR.\r\n\n\r\n\r\n## Description, Motivation and Context\r\n\r\nThis is a possible implementation of the facility discussed in #1314 to make it possible to pass through query parameters from `/oauth2/start` to the Identity Provider login URL in a secure and controlled way. Motivating use cases include\r\n\r\n - wanting most requests to use `prompt=login` by default, but with the possibility to override this to `prompt=select_account` or `prompt=consent` in certain specific cases\r\n - passing a `login_hint` when you already know the user's email address (#1369)\r\n - passing additional non-standard parameters required by certain providers, e.g. `organization` when using auth0 (https://github.com/oauth2-proxy/oauth2-proxy/issues/1314#issuecomment-950273012)\r\n\r\nI've introduced a generic mechanism whereby an oauth2-proxy user can configure `loginURLParameters` in the \"alpha\" YAML config to define the parameters that may be passed through from `/oauth2/start` to the IdP. This is a map from the query parameter name to a set of configuration options:" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition", + "enhancement" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/1500", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 11, + "comments": 4 + }, + "security": { + "scannedAt": "2026-02-27T17:49:36.626Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1541-pkce-support.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1541-pkce-support.json new file mode 100644 index 00000000..ec0df5bc --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1541-pkce-support.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:49.497Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: PKCE Support", + "description": "Adds Code Challenge PKCE support (RFC-7636) and partial\nAuthorization Server Metadata (RFC-8414) for detecting PKCE support.\n\n- Introduces new option `--force-code-challenge-method` to force a\nspecific code challenge method (either `S256` or `plain`) for instances\nwhen the server has not implemented RFC-8414 in order to detect\nPKCE support on the discovery document.\n- In all other cases, if the PKCE support can be determined during discovery\nthen the `code_challenge_methods_supported` is used and S256 is always\npreferred.\n- The force command line argument is helpful with some providers like Azure\nwho supports PKCE but does not list it in their discovery document yet.\n- Initial thought was given to just always attempt PKCE since according to spec\nadditional URL parameters should be dropped by servers which implemented\nOAuth 2, however other projects found cases in the wild where this causes 500\nerrors by buggy implementations.\nSee: https://github.com/spring-projects/spring-security/pull", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Code climate error is unrelated to the changes in this PR.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "[2022/02/17 20:39:10] [oauthproxy.go:777] Error redeeming code during OAuth2 callback: could not get claim \"groups\": failed to fetch claims from profile URL: error making request to profile URL: error performing request: Get \"\": unsupported protocol scheme \"\"", + "[2022/02/20 20:59:00] [providers.go:145] Warning: Your provider supports PKCE methods [\"S256\" \"plain\"], but you have not enabled one with --code-challenge-method", + "[2023/04/15 12:14:21] [oauthproxy.go:823] Error redeeming code during OAuth2 callback: unexpected status \"401\": {\"error\":\"invalid_client\",\"error_description\":\"AADSTS700025: Client is public so neither 'client_assertion' nor 'client_secret' should be presented.\\r\\nTrace ID: 6a6f6b1f-f632-4427-b9e1-ae4f97763e00\\r\\nCorrelation ID: 8dddb863-bae6-45b4-84ee-ce59bc0c547c\\r\\nTimestamp: 2023-04-15 12:14:21Z\",\"error_codes\":[700025],\"timestamp\":\"2023-04-15 12:14:21Z\",\"trace_id\":\"6a6f6b1f-f632-4427-b9e1-ae4f97763e00\",\"correlation_id\":\"8dddb863-bae6-45b4-84ee-ce59bc0c547c\"}" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/1541", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 4, + "comments": 29 + }, + "security": { + "scannedAt": "2026-02-27T17:49:49.497Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1574-add-azure-groups-support-and-azure-oauth-v2-0.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1574-add-azure-groups-support-and-azure-oauth-v2-0.json new file mode 100644 index 00000000..4e8f057f --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1574-add-azure-groups-support-and-azure-oauth-v2-0.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:35.025Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: Add Azure groups support and Azure OAuth v2.0", + "description": "This PR adds support for A", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "> If a user is member of more groups than the overage limit (200 for JWT tokens), then Azure AD does not emit the groups claim in the token. Instead, it includes an overage claim in the token that indicates to the application to query the Microsoft Graph API to retrieve the user's group membership. See https://docs.microsoft.com/en-us/azure/active-directory/develop/id-tokens#groups-overage-claim for more details\n\nYikes! If a user is a member of more that 200 groups, do we really want to be storing that much data in our session anyway? Seems like we will have incredible session bloat", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Error redeeming code during OAuth2 callback: unable to get claims from token: could not get claim \"email\": failed to fetch claims from profile URL: error making request to profile URL: unexpected status \"400\": {\"error\":{\"code\":\"UnknownError\",\"message\":\"Token must contain sub claim.\"", + "> Error redeeming code during OAuth2 callback: unable to get claims from token: could not get claim \"email\": failed to fetch claims from profile URL: error making request to profile URL: unexpected status \"400\": {\"error\":{\"code\":\"UnknownError\",\"message\":\"Token must contain sub claim.\"\r\n>", + "result, err = app.AcquireTokenByCredential(ctx, []string{\"openid\", \"email\", \"profile\", azureGraphURL.String()})" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/1574", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 31, + "comments": 39 + }, + "security": { + "scannedAt": "2026-02-27T17:49:35.025Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1866-add-support-for-unix-socket-as-upstream.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1866-add-support-for-unix-socket-as-upstream.json new file mode 100644 index 00000000..3e4b6b20 --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1866-add-support-for-unix-socket-as-upstream.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:52.802Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: Add support for unix socket as upstream", + "description": "Add support for unix socket at upstream\nMight require some rework.\n\nShould be able to close #1865", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Add support for systemd.socket\n \n When using sockets to pass data between e.g. nginx and oauth2-proxy it's\n simpler to use sockets. Systemd can even facilitate this and pass the\n actual socket directly.\n \n This also means that only the socket runs with the same group as nginx\n while the service runs with DynamicUser.\n \n nginx\n ```\n server {\n location /oauth2/ {\n proxy_pass http://unix:/run/oauth2-proxy/oauth2.sock;\n }\n ```\n \n oauth2-proxy.socket\n ```\n [Socket]\n ListenStream=%t/oauth2.sock\n SocketGroup=www-data\n SocketMode=0660\n ```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "server {\r\n location /oauth2/ {\r\n proxy_pass http://unix:/run/oauth2-proxy/oauth2.sock;\r\n }", + "[Socket]\r\n ListenStream=%t/oauth2.sock\r\n SocketGroup=www-data\r\n SocketMode=0660" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition", + "docs", + "lgtm", + "go", + "changelog", + "tests" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/1866", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 3, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:49:52.802Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1933-validate-a-session-using-the-access-token-in-the-oidc-provider.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1933-validate-a-session-using-the-access-token-in-the-oidc-provider.json new file mode 100644 index 00000000..2c5ff287 --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1933-validate-a-session-using-the-access-token-in-the-oidc-provider.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:44.265Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: Validate a session using the access token in the OIDC provider", + "description": "Another solution would be to validate the ID token only during the initial session creation and skip the validation after the refresh. This seems to be the case with versions < v7.2.0.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Check IDToken expiration time, to see if token must be refreshed. Do not rely only in session age.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "2022-06-27 09:00:37.070 CEST\r\nxxx - 7ff73c70-1a39-40fb-afe4-1164b7a0b700 - xxx@xxx.com [2022/06/27 07:00:37] [AuthSuccess] Authenticated via OAuth2: Session{email:xxx@xxx.com user: PreferredUsername: token:true id_token:true created:2022-06-27 07:00:37.069415957 +0000 UTC m=+325920.006154842 expires:2022-06-27 08:00:36.026393118 +0000 UTC m=+329518.963131992 refresh_token:true}\r\n2022-06-27 10:00:40.317 CEST\r\n[2022/06/27 08:00:40] [stored_session.go:189] Refreshing session - User: ; SessionAge: 1h0m2.930584043s\r\n2022-06-27 10:00:40.471 CEST\r\n[2022/06/27 08:00:40] [oidc.go:87] id_token verification failed: failed to verify token: oidc: token is expired (Token Expiry: 2022-06-27 08:00:36 +0000 UTC)" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition", + "enhancement", + "docs", + "high-priority", + "needs-tests", + "go", + "provider" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/1933", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 7, + "comments": 30 + }, + "security": { + "scannedAt": "2026-02-27T17:49:44.265Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-2070-add-azure-workload-identity-support-read-groups-from-claim.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-2070-add-azure-workload-identity-support-read-groups-from-claim.json new file mode 100644 index 00000000..8c1496cb --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-2070-add-azure-workload-identity-support-read-groups-from-claim.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:38.299Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: Add Azure Workload Identity support & read groups from claim", + "description": "This pull request enables to use oauth2-proxy with federated credentials offered by A", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "This pull request enables Workload Identity for A", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "- --http-address=0.0.0.0:4180\r\n - --metrics-address=0.0.0.0:44180\r\n - --provider=azure\r\n - --oidc-issuer-url=https://login.microsoftonline.com/{tenant-id}/v2.0 # tested with V1 as well\r\n - --email-domain=\"*\"\r\n - --cookie-domain=\".domain.com\"\r\n - --whitelist-domain=\"*\"\r\n - --footer=\"Some msg\"\r\n - --skip-jwt-bearer-tokens=true\r\n - --set-xauthrequest=true\r\n - --silence-ping-logging=true\r\n - --reverse-proxy=true\r\n - --oidc-email-claim=email\r\n - --insecure-oidc-allow-unverified-email=true\r\n - --skip-provider-button=true\r\n - --upstream=static://202\r\n - --set-authorization-header=false\r\n - --client-id={client-id}\r\n - --cookie-secret={cookie-secret}\r\n - --azure-federated-token-auth-enabled=true # tested with client secrets as well", + "resource \"azuread_application\" \"auth\" {\r\n display_name = \"oauth2-proxy\"\r\n\r\n web {\r\n redirect_uris = [\r\n \"https://myapp.domain.com/oauth2/callback\",\r\n ]\r\n }\r\n\r\n required_resource_access {\r\n resource_app_id = \"00000003-0000-0000-c000-000000000000\" # Microsoft Graph\r\n resource_access {\r\n id = \"e1fe6dd8-ba31-4d61-89e7-88639da4683d\" # User.Read\r\n type = \"Scope\"\r\n }\r\n }\r\n}\r\nresource \"azuread_application_federated_identity_credential\" \"oauth2-proxy\" {\r\n application_id = azuread_application.auth.id\r\n display_name = \"oauth2-proxy\"\r\n description = \"oauth2-proxy\"\r\n audiences = [\"api://AzureADTokenExchange\"]\r\n issuer = \"https://mycluster.oidc-endpoint.com\"\r\n subject = \"system:serviceaccount:oauth2-proxy:oauth2-proxy\"\r\n}", + "serviceAccount:\r\n annotations:\r\n azure.workload.identity/client-id: 5b2583ce-9789-4fd2-970c-2cf711b648ed\r\n\r\npodLabels:\r\n azure.workload.identity/use: \"true\"\r\n\r\nextraArgs:\r\n - --provider=azure\r\n - --client-id=\r\n - --azure-federated-token-auth-enabled\r\n - --oidc-issuer-url=\r\n - --upstream=static://202\r\n - --reverse-proxy=true" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition", + "enhancement" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [ + "Pod", + "Secret" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/2070", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 11, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:49:38.299Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-3014-feat-ability-to-parse-jwt-encoded-profile-claims.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-3014-feat-ability-to-parse-jwt-encoded-profile-claims.json new file mode 100644 index 00000000..5fe9b5e9 --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-3014-feat-ability-to-parse-jwt-encoded-profile-claims.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:33.689Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: feat: ability to parse JWT encoded profile claims", + "description": "Ability to parse claims from a JWT encoded payload, fixes #2906.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Created by `brew bump`\n\n---\n\nCreated with `brew bump-formula-pr`.
\n release notes\n
## Release Highlights\n- 📨 OAuth 2.0 Multiple Response Type Encoding\n- 📦️ Support for JWT encoded profile claims\n- 🔵 Golang version upgrade to v1.23.8\n- 🕵️‍♀️ Vulnerabilities have been addressed\n  - [CVE-2025-22871](https://github.com/advisories/GHSA-g9pc-8g42-g6vq)\n- 🐛 Squashed some bugs\n  \n## Important Notes\n\n## Breaking Changes\n\n## Changes since v7.8.2\n\n- [#3031](https://github.com/oauth2-proxy/oauth2-proxy/pull/3031) Fixes Refresh Token bug with Entra ID and Workload Identity (#3027)[https://github.com/oauth2-proxy/oauth2-proxy/issues/3028] by using client assertion when redeeming the token (@richard87)\n- [#3001](https://github.com/oauth2-proxy/oauth2-proxy/pull/3001) Allow to set non-default authorization request response mode (@stieler-it)\n- [#3041](https://github.com/oauth2-proxy/oauth2-proxy/pull/3041) chore(deps): upgrade to latest golang v1.23.x release (@TheImplementer)\n- [#1916](https://github.com/oauth2-proxy/oauth2-proxy/pull/1916) fix: role extraction from access token in keycloak oidc (@Elektordi / @tuunit)\n- [#3014](https://github.com/oauth2-proxy/oauth2-proxy/pull/3014) feat: ability to parse JWT encoded profile claims (@ikarius)
\n
\n
", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition", + "docs", + "go", + "tests" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/3014", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 47, + "comments": 3 + }, + "security": { + "scannedAt": "2026-02-27T17:49:33.689Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-3104-feat-cookie-add-feature-support-for-cookie-secret-file.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-3104-feat-cookie-add-feature-support-for-cookie-secret-file.json new file mode 100644 index 00000000..48bc93b2 --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-3104-feat-cookie-add-feature-support-for-cookie-secret-file.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:42.536Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: feat(cookie): add feature support for cookie-secret-file", + "description": "This PR adds a new command-line option `--cookie-secret-file` that allows users to specify a file path containing the cookie secret, similar to the existing `--client-secret-file` option. This enhancement provides a more secure way to handle cookie secrets, especially in containeri", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I don't really understand why you want a separate file for the cookie secret? What's the issue with having it in the config file?\n\nAnd security best practices actually recommend storing crucial data inside environment variables and not storing them in files. Which is why I don't get the point of this PR", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "sandy@sandydembp oauth2-proxy % go test ./pkg/validation/cookie_test.go ./pkg/validation/cookie.go -v\r\n=== RUN TestValidateCookie\r\n=== RUN TestValidateCookie/with_valid_configuration\r\n=== RUN TestValidateCookie/with_no_cookie_secret\r\n=== RUN TestValidateCookie/with_an_invalid_cookie_secret\r\n=== RUN TestValidateCookie/with_a_valid_Base64_secret\r\n=== RUN TestValidateCookie/with_an_invalid_Base64_secret\r\n=== RUN TestValidateCookie/with_an_invalid_name\r\n=== RUN TestValidateCookie/with_a_name_that_is_too_long\r\n=== RUN TestValidateCookie/with_refresh_longer_than_expire\r\n=== RUN TestValidateCookie/with_samesite_\"none\"\r\n=== RUN TestValidateCookie/with_samesite_\"lax\"\r\n=== RUN TestValidateCookie/with_samesite_\"strict\"\r\n=== RUN TestValidateCookie/with_samesite_\"invalid\"\r\n=== RUN TestValidateCookie/with_a_combination_of_configuration_errors\r\n=== RUN TestValidateCookie/with_session_cookie_configuration\r\n=== RUN TestValidateCookie/with_valid_secret_file\r\n=== RUN TestValidateCookie/with_nonexistent_secret_file\r\n--- PASS: TestValidateCookie (0.00s)\r\n --- PASS: TestValidateCookie/with_valid_configuration (0.00s)\r\n --- PASS: TestValidateCookie/with_no_cookie_secret (0.00s)\r\n --- PASS: TestValidateCookie/with_an_invalid_cookie_secret (0.00s)\r\n --- PASS: TestValidateCookie/with_a_valid_Base64_secret (0.00s)\r\n --- PASS: TestValidateCookie/with_an_invalid_Base64_secret (0.00s)\r\n --- PASS: TestValidateCookie/with_an_invalid_name (0.00s)\r\n --- PASS: TestValidateCookie/with_a_name_that_is_too_long (0.00s)\r\n --- PASS: TestValidateCookie/with_refresh_longer_than_expire (0.00s)\r\n --- PASS: TestValidateCookie/with_samesite_\"none\" (0.00s)\r\n --- PASS: TestValidateCookie/with_samesite_\"lax\" (0.00s)\r\n --- PASS: TestValidateCookie/with_samesite_\"strict\" (0.00s)\r\n --- PASS: TestValidateCookie/with_samesite_\"invalid\" (0.00s)\r\n --- PASS: TestValidateCookie/with_a_combination_of_configuration_errors (0.00s)\r\n --- PASS: TestValidateCookie/with_session_cookie_configuration (0.00s)\r\n --- PASS: TestValidateCookie/with_valid_secret_file (0.00s)\r\n --- PASS: TestValidateCookie/with_nonexistent_secret_file (0.00s)\r\nPASS\r\nok command-line-arguments 0.208s" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition", + "docs", + "go", + "tests" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/3104", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 8, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:49:42.537Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-469-add-user-id-claim-to-support-other-claims-than-email.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-469-add-user-id-claim-to-support-other-claims-than-email.json new file mode 100644 index 00000000..cd9bcb98 --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-469-add-user-id-claim-to-support-other-claims-than-email.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:51.158Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: Add -user-id-claim to support other claims than email", + "description": "- Add the option `user-id-claim` (defaults to email)\n- OIDC extracts this claim into session.Email (to be renamed later)\n- providers: add `CreateSessionStateFromBearerToken` with a default impl taken from\n `GetJwtSession` and overridden by oidc to respect `user-id-claim`\n\nOnce #466 is merged, I can continue to port other work from #448, namely to rename SessionState.Email to .UserID, adjust (de)seriali", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "### Commits\n\nNOTE: I split the change into multiple commits to make review easier but expect them to be squashed together upon merge. The commits are (oldest first):\n1. \"WIP Add -user-id-claim ...\" - add the config option and change the oidc provider to use it\n2. \"Continued: Rename SessionState.Email to UserID\" - a simple refactoring that touches many places. I also add `UserIDType` and set it as appropriate, add `LegacyEmail` and use it to decode sessions stored prior to this change.\n3. \"Fix uses of UserID x Email\": Differentiate behavior (primarily verification) based of whether `UserIDType` is email or not\n4. \"Fix GetJwtSession to respect user-id-claim\"\n\n### Changes so far\n\n1. Add a new list option, `-user-id-claim` (defaults to `[\"email\"]`)\n2. The OIDC provider uses it to extract the first present claim into the newly named `Session.UserID`, in order, and only fails if none of the requested user ID claims is present (note: I believe it is not useful for any other, more speciali", + "steps": [ + "\"WIP Add -user-id-claim ...\" - add the config option and change the oidc provider to use it", + "\"Continued: Rename SessionState.Email to UserID\" - a simple refactoring that touches many places. I also add `UserIDType` and set it as appropriate, add `LegacyEmail` and use it to decode sessions stored prior to this change.", + "\"Fix uses of UserID x Email\": Differentiate behavior (primarily verification) based of whether `UserIDType` is email or not", + "\"Fix GetJwtSession to respect user-id-claim\"", + "Add a new list option, `-user-id-claim` (defaults to `[\"email\"]`)", + "The OIDC provider uses it to extract the first present claim into the newly named `Session.UserID`, in order, and only fails if none of the requested user ID claims is present (note: I believe it is not useful for any other, more speciali" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/469", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 3, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:49:51.158Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-762-support-traefik-forwardauth.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-762-support-traefik-forwardauth.json new file mode 100644 index 00000000..915e415c --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-762-support-traefik-forwardauth.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:45.872Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: Support traefik forwardAuth", + "description": "Add support for traefiks X-Forwarded headers:\r\n- X-Forwarded-Proto\r\n- X-Forwarded-Host\r\n- X-Forwarded-Uri\r\nUse them to construct a redirect uri, when X-Auth-Request-Redirect\r\nis not present or valid.\r\n\r\nUse one endpoint for auth and sign_in, which is the way\r\nthomseddon/traefik-forward-auth works.\r\nThis also allows to skip the auth provider button.\r\n\r\nFixes #46\r\n\r\n\r\n\r\n## Description\r\n\r\n", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Reverse proxies like Traefik currently don't have a dynamic way to manipulate headers, thus handling redirects is not easy.\n\nOn Traefik v1, you could set the `rd` query string _per frontend_, thus allowing more customi", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "http:\r\n routers:\r\n a-service:\r\n rule: \"Host(`a-service.example.com`)\"\r\n service: a-service-backend\r\n middlewares:\r\n - oauth-errors\r\n - oauth-auth\r\n tls:\r\n certResolver: default\r\n domains:\r\n - main: \"example.com\"\r\n sans:\r\n - \"*.example.com\"\r\n oauth:\r\n rule: \"Host(`a-service.example.com`, `oauth.example.com`) && PathPrefix(`/oauth2/`)\"\r\n middlewares:\r\n - auth-headers\r\n service: oauth-backend\r\n tls:\r\n certResolver: default\r\n domains:\r\n - main: \"example.com\"\r\n sans:\r\n - \"*.example.com\"\r\n\r\n services:\r\n a-service-backend:\r\n loadBalancer:\r\n servers:\r\n - url: http://172.16.0.2:7555\r\n oauth-backend:\r\n loadBalancer:\r\n servers:\r\n - url: http://172.16.0.1:4180\r\n\r\n middlewares:\r\n auth-headers:\r\n headers:\r\n sslRedirect: true\r\n stsSeconds: 315360000\r\n browserXssFilter: true\r\n contentTypeNosniff: true\r\n forceSTSHeader: true\r\n sslHost: example.com\r\n stsIncludeSubdomains: true\r\n stsPreload: true\r\n frameDeny: true\r\n oauth-auth:\r\n forwardAuth:\r\n address: https://oauth.example.com/oauth2/auth\r\n trustForwardHeader: true\r\n oauth-errors:\r\n errors:\r\n status:\r\n - \"401-403\"\r\n service: oauth-backend\r\n query: \"/oauth2/sign_in\"" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/762", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 5, + "comments": 15 + }, + "security": { + "scannedAt": "2026-02-27T17:49:45.872Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-914-extract-email-from-id-token-for-azure-provider.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-914-extract-email-from-id-token-for-azure-provider.json new file mode 100644 index 00000000..e327026d --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-914-extract-email-from-id-token-for-azure-provider.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:47.225Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "oauth2-proxy: extract email from id_token for azure provider", + "description": "this change fixes a bug when `--resource` is specified with non-Graph api and the access token destined to --resource is used to call Graph api", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "this will address #779 as well", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/914", + "sourceRepo": "oauth2-proxy/oauth2-proxy", + "reactions": 4, + "comments": 34 + }, + "security": { + "scannedAt": "2026-02-27T17:49:47.225Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/open-cluster-management/open-cluster-management-220-seedling-adding-contextual-logging-in-registration-c.json b/solutions/cncf-generated/open-cluster-management/open-cluster-management-220-seedling-adding-contextual-logging-in-registration-c.json new file mode 100644 index 00000000..feb8a91f --- /dev/null +++ b/solutions/cncf-generated/open-cluster-management/open-cluster-management-220-seedling-adding-contextual-logging-in-registration-c.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:55.002Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "open-cluster-management: :seedling: adding contextual logging in registration component", + "description": "## Summary\nAdding contextual logging \n## Related issue(s)\n\nFixes #191 \n\nFor More Info:\nhttps://www.kubernetes.dev/blog/2022/05/25/contextual-logging/\nhttps://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/ok-to-test", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "open-cluster-management", + "sandbox", + "app-definition", + "lgtm", + "approved", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "open-cluster-management" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/open-cluster-management-io/ocm/pull/220", + "sourceRepo": "open-cluster-management-io/ocm", + "reactions": 0, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:49:55.002Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4029-implement-distributed-tracing-using-opentelemtry.json b/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4029-implement-distributed-tracing-using-opentelemtry.json new file mode 100644 index 00000000..b19c305e --- /dev/null +++ b/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4029-implement-distributed-tracing-using-opentelemtry.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:48.565Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "open-policy-agent-opa-: Implement Distributed tracing using OpenTelemtry", + "description": "This commit implements tracing using the net/http [automatic instrumentation wrappers](https://opentelemetry.io/docs/go/instrumentation/#automatic-instrumentation) on the server and topdown/http packages.\n\nFollowing configuration flags are added:\n --distributed-tracing enable distributed tracing using OpenTelemetry Tracing\n --distributed-tracing-address string address of the OpenTelemetry Collector gRPC endpoint (default \"localhost:4317\")\n --distributed-tracing-sample-rate int precentage of traces that are sampled and exported (default 100)\n --distributed-tracing-service-name string logical name of the service used by OpenTelemetry Tracing (default \"opa\")\n --distributed-tracing-tls {off,tls,mtls} set distributed tracing tls scheme (default off)\n --distributed-tracing-tls-cert-file string set path of TLS certificate file of the tracing exporter\n --distributed-tracing-tls-private-key-file string ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Love this! 👍 \n\nOne thing I'm not too sure about though is all the new arguments added to `opa run`. Not in the sense that they aren't necessary, but I wonder if it'd be a good idea to instead have them added to the OPA configuration? In addition to adding clutter to the `opa run --help` output, command line arguments have some disadvantages over config, including:\n\n1. These attributes aren't available for review at the `/v1/config` endpoint.\n2. They can't be set/modified by discovery bundles.\n3. They can't be set programmatically / by the SDK (not sure if applicable here though).\n\nWe've already seen this being problematic [elsewhere](https://github.com/open-policy-agent/opa/issues/3980), so perhaps this would be a good candidate for config rather than CLI arguments? Note that it's still possible to set config attributes through the CLI with `--set config=value` if one prefers that.\n\nThoughts, @srenatus @tsandall ?", + "steps": [ + "These attributes aren't available for review at the `/v1/config` endpoint.", + "They can't be set/modified by discovery bundles.", + "They can't be set programmatically / by the SDK (not sure if applicable here though)." + ], + "codeSnippets": [ + "{\"client_addr\":\"127.0.0.1:53744\",\"level\":\"info\",\"msg\":\"Received request.\",\"req_id\":12,\"req_method\":\"GET\",\"req_path\":\"/health\",\"time\":\"2021-12-10T11:17:56+01:00\"}\r\n{\"client_addr\":\"127.0.0.1:53744\",\"level\":\"info\",\"msg\":\"Sent response.\",\"req_id\":12,\"req_method\":\"GET\",\"req_path\":\"/health\",\"resp_bytes\":2,\"resp_duration\":0.280679,\"resp_status\":200,\"time\":\"2021-12-10T11:17:56+01:00\"}\r\n{\"level\":\"error\",\"msg\":\"Distributed tracing: context deadline exceeded\",\"time\":\"2021-12-10T11:18:04+01:00\"}\r\n{\"level\":\"error\",\"msg\":\"Distributed tracing: max retry time elapsed: rpc error: code = Unavailable desc = connection error: desc = \\\"transport: Error while dialing dial tcp 127.0.0.1:4317: connect: connection refused\\\"\",\"time\":\"2021-12-10T11:18:09+01:00\"}" + ] + } + }, + "metadata": { + "tags": [ + "open-policy-agent-opa-", + "graduated", + "security" + ], + "category": "security", + "cncfProjects": [ + "open-policy-agent-opa-" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/open-policy-agent/opa/pull/4029", + "sourceRepo": "open-policy-agent/opa", + "reactions": 3, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:44:48.565Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4254-ci-publish-multi-arch-image-manifest-lists.json b/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4254-ci-publish-multi-arch-image-manifest-lists.json new file mode 100644 index 00000000..9c5b2d8b --- /dev/null +++ b/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4254-ci-publish-multi-arch-image-manifest-lists.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:44:47.408Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "open-policy-agent-opa-: ci: publish multi-arch image manifest lists", + "description": "This change adds linux/arm64 binaries to the release. It also publishes an arm64 container image for all variants (standard, debug, rootless, static) and releases (dev, edge, latest).\n\nThe build and push process uses buildx to push the individual images by digest (i.e. untagged) and reference them in a single, tagged manifest list. This avoids cluttering Docker Hub's tag list with `-` tags.\n\nFixes #2233", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "With this change, we're dialling back #4254 a little when it comes to linux/arm64:\n\n- The built binaries only include a static linux_arm64:\n\n ```\n Archive: binaries.zip\n creating: 0.37.0-dev/\n inflating: 0.37.0-dev/opa_darwin_amd64.sha256\n inflating: 0.37.0-dev/opa_darwin_amd64\n inflating: 0.37.0-dev/opa_darwin_arm64_static.sha256\n inflating: 0.37.0-dev/opa_darwin_arm64_static\n inflating: 0.37.0-dev/opa_linux_amd64.sha256\n inflating: 0.37.0-dev/opa_linux_amd64\n inflating: 0.37.0-dev/opa_linux_amd64_static.sha256\n inflating: 0.37.0-dev/opa_linux_amd64_static\n inflating: 0.37.0-dev/opa_linux_arm64_static.sha256\n inflating: 0.37.0-dev/opa_linux_arm64_static\n inflating: 0.37.0-dev/opa_windows_amd64.exe.sha256\n inflating: 0.37.0-dev/opa_windows_amd64.exe\n ```\n- The docker manifests for the non-static tags (version, version-debug, version-rootless) only contain the linux/amd64 platforms\n- The docker manifest for the static tag (version-static) contains both linux/amd64 and linux/arm64\n- Smoke tests for our docker images only test the images we build\n\nFixes #4280.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Archive: binaries.zip\r\n creating: 0.37.0-dev/\r\n inflating: 0.37.0-dev/opa_darwin_amd64.sha256\r\n inflating: 0.37.0-dev/opa_darwin_amd64\r\n inflating: 0.37.0-dev/opa_darwin_arm64_static.sha256\r\n inflating: 0.37.0-dev/opa_darwin_arm64_static\r\n inflating: 0.37.0-dev/opa_linux_amd64.sha256\r\n inflating: 0.37.0-dev/opa_linux_amd64\r\n inflating: 0.37.0-dev/opa_linux_amd64_static.sha256\r\n inflating: 0.37.0-dev/opa_linux_amd64_static\r\n inflating: 0.37.0-dev/opa_linux_arm64_static.sha256\r\n inflating: 0.37.0-dev/opa_linux_arm64_static\r\n inflating: 0.37.0-dev/opa_windows_amd64.exe.sha256\r\n inflating: 0.37.0-dev/opa_windows_amd64.exe", + "WARNING: The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested" + ] + } + }, + "metadata": { + "tags": [ + "open-policy-agent-opa-", + "graduated", + "security" + ], + "category": "security", + "cncfProjects": [ + "open-policy-agent-opa-" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/open-policy-agent/opa/pull/4254", + "sourceRepo": "open-policy-agent/opa", + "reactions": 8, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:44:47.408Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opencost/opencost-1430-added-current-cluster-id-filter.json b/solutions/cncf-generated/opencost/opencost-1430-added-current-cluster-id-filter.json new file mode 100644 index 00000000..bfc01b0a --- /dev/null +++ b/solutions/cncf-generated/opencost/opencost-1430-added-current-cluster-id-filter.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:21.854Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opencost: added current cluster id filter", + "description": "Fixes #1431\n## What does this PR change?\nIntroduces `CURRENT_CLUSTER_ID_FILTER_ENABLED` env variable which adds `=` filter to each prometheus query. It allows using shared prometheus for multiple Kubernetes clusters\n\n## Does this PR relate to any other PRs?\n* No\n\n## How will this PR impact users?\n* It just adds optional variable which by default doesn't change existing functionality\n\n## How was this PR tested?\n* It was tested in my EKS cluster with `CURRENT_CLUSTER_ID_FILTER_ENABLED` set to `false`, `true` and unset. Used UI for testing, so it means that tested `/allocation` endpoint only\n\n## Does this PR require changes to documentation?\n* `CURRENT_CLUSTER_ID_FILTER_ENABLED` variable can be added to documentation\n\n## Have you labeled this PR and its corresponding Issue as \"next release\" if it should be part of the next Opencost release? If not, why not?\n* Do not know which labels do you use", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hi @AndrewChubatiuk sorry for the late response here, we were at Kubecon. In general I'm supportive of these changes. Could you add a few more notes under \"testing\"? For example, was this tested successfully when no clusterID was specified as well as when one was? If so what endpoints were used to test?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "opencost", + "incubating", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "opencost" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/opencost/opencost/pull/1430", + "sourceRepo": "opencost/opencost", + "reactions": 1, + "comments": 23 + }, + "security": { + "scannedAt": "2026-02-27T17:46:21.855Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opencost/opencost-2317-cloud-costs-kubernetesless.json b/solutions/cncf-generated/opencost/opencost-2317-cloud-costs-kubernetesless.json new file mode 100644 index 00000000..6ecdde13 --- /dev/null +++ b/solutions/cncf-generated/opencost/opencost-2317-cloud-costs-kubernetesless.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:20.755Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opencost: Cloud Costs Kubernetesless", + "description": "## What does this PR change?\n* With the addition of the `KUBERNETES_ENABLED` and `CLOUD_COST_CONFIG_PATH` environment variables, you can now run OpenCost without Kubernetes (or Prometheus). The [/cloudCosts API](https://www.opencost.io/docs/integrations/api#cloudcost) is available, which means you could use this as a stand-alone API to your cloud provider billing.\n\n## Does this PR relate to any other PRs?\nno\n\n## How will this PR impact users?\n* Users will be able to use the existing OCI image provided for Kubernetes deployments and simply pass new environment variables to it within Docker to run it wherever they want outside of Kubernetes. They could also build it on their workstation and point directly to the API.\n\n`KUBERNETES_ENABLED=false CLOUD_COST_ENABLED=true CLOUD_COST_CONFIG_PATH='cloud-costs.json' ./costmodel`\n\nand hit the API with\n\n`curl -G http://localhost:9003/cloudCost -d window=7d -d aggregate=provider | jq`\n\n## Does this PR address any GitHub or Zendesk issues?\n* Closes ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## What does this PR change?\n* The API port and server and the UI port are all now configurable. The API port can be moved off of 9003 and the UI can be moved off of 9090 through environment variables. If you need to modify the API server in the `default.nginx.conf` you can set the `API_SERVER`.\n\n## Does this PR relate to any other PRs?\n* Continues the work started in https://github.com/opencost/opencost/pull/2283\n* Enabled by https://github.com/opencost/opencost-helm-chart/pull/156\n\n## How will this PR impact users?\n* Ports are now configurable\n\n## Does this PR address any GitHub or Zendesk issues?\n* Closes https://github.com/opencost/opencost/issues/2223\n* https://github.com/opencost/opencost/pull/2317 needs this to work\n\n## How was this PR tested?\n* Tested as a standard build with an unaware Helm chart\n* Tested with the updated Helm chart with no changes\n* Tested with the updated Helm chart with changed API and UI ports\n* Verified with port-forwarding 9091 and 9004\n* Test added to the code for the API_PORT\n\n## Does this PR require changes to documentation?\n* It will be part of the \"running with Docker\" documentation to be added eventually\n\n## Have you labeled this PR and its corresponding Issue as \"next release\" if it should be part of the next OpenCost release? If not, why not?\n* Yes, if we want to be able to ship the Docker version of OpenCost", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "2023-11-17T15:28:06.366263035Z ??? Log level set to info\r\n2023-11-17T15:28:06.366467786Z INF Starting cost-model version dev (HEAD)\r\n2023-11-17T15:28:06.36652612Z INF Kubernetes enabled: true\r\n...", + "docker run -e API_SERVER=host.docker.internal -p 9090:9090 -t mattray/opencost-ui:dev01" + ] + } + }, + "metadata": { + "tags": [ + "opencost", + "incubating", + "observability", + "next-release", + "p2", + "e2", + "needs-follow-up" + ], + "category": "observability", + "cncfProjects": [ + "opencost" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/opencost/opencost/pull/2317", + "sourceRepo": "opencost/opencost", + "reactions": 3, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:46:20.756Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opencost/opencost-2423-use-node-label-instead-of-extracting-from-instance.json b/solutions/cncf-generated/opencost/opencost-2423-use-node-label-instead-of-extracting-from-instance.json new file mode 100644 index 00000000..4ca5f780 --- /dev/null +++ b/solutions/cncf-generated/opencost/opencost-2423-use-node-label-instead-of-extracting-from-instance.json @@ -0,0 +1,56 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:19.094Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opencost: Use node label instead of extracting from instance", + "description": "Given a common cAdvisor setup the instance label will refer to the node by IP rather than hostname\nThis uses the `node` label instead, relying on a setup where that label is properly configured rather than an unexpected `instance` setup.\n\n## What does this PR change?\n* Switches the querying to the `node` label instead of relying on users configuring prometheus to add the node to the `instance` hostname\n\n## How will this PR impact users?\n* Common monitoring setups will no longer break opencost (this also affects kubecost)\n* If you're configuring prometheus directly you need the `relabel_configs` from below\n* If you're using `ServiceMonitors` you need to make certain you have `honorLabels: true`\n\n## Does this PR address any GitHub or Zendesk issues?\n* Closes #2281\n\n## How was this PR tested?\n* On a local cluster\n* In an IDE debugging\n* (metrics breaking kubecost was confirmed in local cluster as well)\n\n## Does this PR require changes to documentation?\nYes, the following `relabel_configs`", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## What does this PR change?\nThis is a companion to https://github.com/opencost/opencost/pull/2423\n\n## Does this PR rely on any other PRs?\nWithout the changes from [opencost/2423](https://github.com/opencost/opencost/pull/2423) this _will_ break queries in kubecost.\n\n## How does this PR impact users? (This is the kind of thing that goes in release notes!)\nIf you've setup your own `relabel_configs` you need to add the following configuration manually:\n```yaml\n - action: labelmap\n regex: __meta_kubernetes_node_name\n replacement: node\n```\n\n## Links to Issues or tickets this PR addresses or fixes\n\n## What risks are associated with merging this PR? What is required to fully test this PR?\nWithout the changes from the linked PR some queries will stop working, this should happen immediately and get caught by tests\n\n## How was this PR tested?\n\n## Have you made an update to documentation? If so, please provide the corresponding PR.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "relabel_configs:\r\n- action: labelmap\r\n regex: __meta_kubernetes_node_name\r\n replacement: node", + "- action: labelmap\r\n regex: __meta_kubernetes_node_name\r\n replacement: node", + "- target_label: __address__\r\n replacement: kubernetes.default.svc:443" + ] + } + }, + "metadata": { + "tags": [ + "opencost", + "incubating", + "observability", + "p2", + "kubecost", + "e2", + "needs-follow-up", + "jiracreated", + "1-115" + ], + "category": "observability", + "cncfProjects": [ + "opencost" + ], + "targetResourceKinds": [ + "Service", + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/opencost/opencost/pull/2423", + "sourceRepo": "opencost/opencost", + "reactions": 5, + "comments": 30 + }, + "security": { + "scannedAt": "2026-02-27T17:46:19.094Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/openfeature/openfeature-119-remove-context-transformers-add-provider-hooks.json b/solutions/cncf-generated/openfeature/openfeature-119-remove-context-transformers-add-provider-hooks.json new file mode 100644 index 00000000..18f7e284 --- /dev/null +++ b/solutions/cncf-generated/openfeature/openfeature-119-remove-context-transformers-add-provider-hooks.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:24.217Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "openfeature: Remove context transformers. Add provider hooks", + "description": "refs https://github.com/open-feature/research/pull/22", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hey @justinabrahms, could you please signoff your commit?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "Hooks MUST be evaluated in the following order: - before: API, Client, Invocation, Provider - after: Provider, Invocation," + ] + } + }, + "metadata": { + "tags": [ + "openfeature", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "openfeature" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/open-feature/spec/pull/119", + "sourceRepo": "open-feature/spec", + "reactions": 2, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:46:24.217Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/openfeature/openfeature-227-feat-context-propagation.json b/solutions/cncf-generated/openfeature/openfeature-227-feat-context-propagation.json new file mode 100644 index 00000000..3431372c --- /dev/null +++ b/solutions/cncf-generated/openfeature/openfeature-227-feat-context-propagation.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:26.824Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "openfeature: feat: context propagation", + "description": "## This PR\n\nAdds context propagation as described in #81.\n\nTo me it looks like this should be implementable in all the current languages.\n\nThe following things I am not sure on: \n- Is this concrete enough?\n- Should 3.2 and 3.3 be switched around as 3.3 is assumed in 3.2?\n- Should we nest the requirements more e.g. do we want to have a specific section for the propagator?\n\n### Related Issues\n\nFixes #81", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Hey @lukas-reining - sorry for a slow response from my part on this. I will be reviewing this first thing on Monday.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "With the resulting span looking something like:", + "or if the caller to `someFunc` passed a `ctx` that already contained a span:", + "## Related\r\n\r\n- [`trace/context.go`](https://github.com/open-telemetry/opentelemetry-go/blob/33f5cf460bc0a2e4648e5f3fd8f2e4faa79ddb0e/trace/context.go)" + ] + } + }, + "metadata": { + "tags": [ + "openfeature", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "openfeature" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/open-feature/spec/pull/227", + "sourceRepo": "open-feature/spec", + "reactions": 2, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:46:26.824Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/openfeature/openfeature-229-feat-add-domain-as-an-openfeature-concept.json b/solutions/cncf-generated/openfeature/openfeature-229-feat-add-domain-as-an-openfeature-concept.json new file mode 100644 index 00000000..4c22e8c3 --- /dev/null +++ b/solutions/cncf-generated/openfeature/openfeature-229-feat-add-domain-as-an-openfeature-concept.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:25.834Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "openfeature: feat: add domain as an openfeature concept", + "description": "## This PR\n\n- defines `domain` as a concept\n- updates references to client name to use `domain`\n- add `domain` as client metadata\n\n### Related Issues\n\nFixes #228\n\n### Follow-up Tasks\n\n- Update the readme template\n- Update the openfeature.dev SDK compatibility table", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## This PR\n\n- overloads the set provider methods to support defining context\n- updates to the server and client readmes\n\n### Related Issues\n\nFixes #748\n\n### Notes\n\nI had to move the logic outside of the abstract class because the client supports named client context, which isn't available in the abstract. I also have to define the overloads on each implementation (web, server) in order for intellisense to work properly.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "You rather want:" + ] + } + }, + "metadata": { + "tags": [ + "openfeature", + "incubating", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "openfeature" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/open-feature/spec/pull/229", + "sourceRepo": "open-feature/spec", + "reactions": 2, + "comments": 19 + }, + "security": { + "scannedAt": "2026-02-27T17:46:25.835Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/openfga/openfga-1015-chore-add-docker-compose-override-file-for-local-development.json b/solutions/cncf-generated/openfga/openfga-1015-chore-add-docker-compose-override-file-for-local-development.json new file mode 100644 index 00000000..6ed6a289 --- /dev/null +++ b/solutions/cncf-generated/openfga/openfga-1015-chore-add-docker-compose-override-file-for-local-development.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:30.912Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "openfga: chore: add docker compose override file for local development", + "description": "Adds a `docker-compose.override` file that can be used in conjunction with the existing `docker-compose.yaml` file for applying local development specific build instructions and configuration for services.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I think the biggest difference when using an override file is that there's no way to skip the overriding. Why not just modify the current docker-compose.yaml file to not use the latest image? Is the intent of the docker-compose setup to get started quick with the latest stable version or with the current version in the filesystem?", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "[see 2 files with indirect coverage changes](https://app.codecov.io/gh/openfga/openfga/pull/1015/indirect-changes?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=openfga)\n\n\n
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/openfga/openfga/pull/1015?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=openfga). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=openfga).\n\n> @sanketrai1 May I ask if you can add a third option to the \"Build from Source\" documentation in the README (we can call it: build using docker-compose) which documents how to build it for people other than us who may want to follow?\r\n> \r\n> e.g. `docker compose up -f ... -f ...`\r\n\r\nFrom my understanding, and it appears to work this way now as well, Docker looks for `docker-compose.yml` and `docker-compose.override.yml` by default. So nothing changes with the existing documentation around `docker compose up`.\n@rhamzeh, @jon-whit is correct. If `docker-compose.override.yml` is present, then `docker-compose.yml` and `docker-compose.override.yml` are merged automatically when running `docker compose up`. If a compose override file with any other name than `docker-compose.override.yml` or `compose.override.yml` needs to be merged with the default compose file, only then multiple `-f` arguments syntax is needed.\n@sanketrai1 could you please rebase on `main`?\nI think the biggest difference when using an override file is that there's no way to skip the overriding. Why not just modify the current docker-compose.yaml file to not use the latest image? Is the intent of the docker-compose setup to get started quick with the latest stable version or with the current version in the filesystem?\n> I think the biggest difference when using an override file is that there's no way to skip the overriding. Why not just modify the current docker-compose.yaml file to not use the latest image? Is the intent of the docker-compose setup to get started quick with the latest stable version or with the current version in the filesystem?\r\n\r\nI think ideally the main `docker-compose.yaml` file would be as close to get set up and started with minimal effort.\r\n\r\nIt's what we can point users to to copy and get started w/ OpenFGA and also probably replace the docker-compose snippet in [docs](https://openfga.dev/docs/getting-started/setup-openfga/docker#postgres) so that we do not need to maintain two separate places.\r\n\r\nIn our documentation we can then say \"copy the following snippet to get started quickly\":", + "With a few redirects, this can be simplified to:" + ] + } + }, + "metadata": { + "tags": [ + "openfga", + "incubating", + "security" + ], + "category": "security", + "cncfProjects": [ + "openfga" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/openfga/openfga/pull/1015", + "sourceRepo": "openfga/openfga", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:30.912Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/openfga/openfga-2479-feat-support-separate-read-and-write-datastores-for-postgresql.json b/solutions/cncf-generated/openfga/openfga-2479-feat-support-separate-read-and-write-datastores-for-postgresql.json new file mode 100644 index 00000000..d4d66e07 --- /dev/null +++ b/solutions/cncf-generated/openfga/openfga-2479-feat-support-separate-read-and-write-datastores-for-postgresql.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:29.823Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "openfga: feat: support separate read and write datastores for postgresql", + "description": "support separate read and write datastores for postgresql improved performance and scalability\n\nThis change introduces the ability to configure separate read and write datastores in the application, specifically targeting PostgreSQL setups. This enhancement is aimed at improving performance and scalability by distributing read and write operations across different databases.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Introduced detailed guidance on configuring OpenFGA with PostgreSQL read replicas to enhance performance and scalability. This update includes instructions for setting up primary and secondary datastores, considerations for synchronous vs. asynchronous replication, and best practices for ensuring data consistency and monitoring replication lag.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/openfga/openfga/pull/2479?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=openfga). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=openfga).\n\n
:rocket: New features to boost your workflow: \n\n- :snowflake: [Test Analytics](https://docs.codecov.com/docs/test-analytics): Detect flaky tests, report on failures, and find test suite problems.\n
\n@DanCech PR updated \nHmm, I haven't run into the go-bench issue before, but testing locally (`go test -bench=BenchmarkOpenFGAServer/BenchmarkPostgresDatastore/BenchmarkCheck/with_complex_ttu ./pkg/server`) I am seeing that it's ~5x slower (660ms vs 115ms) than main so we'll need to figure out what's going on there.\r\n\r\nI was able to get some clean profiles by running" + ] + } + }, + "metadata": { + "tags": [ + "openfga", + "incubating", + "security" + ], + "category": "security", + "cncfProjects": [ + "openfga" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/openfga/openfga/pull/2479", + "sourceRepo": "openfga/openfga", + "reactions": 7, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:46:29.824Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/openkruise/openkruise-1336-docs-add-in-place-workload-vertical-scaling-proposal.json b/solutions/cncf-generated/openkruise/openkruise-1336-docs-add-in-place-workload-vertical-scaling-proposal.json new file mode 100644 index 00000000..fa4a59e0 --- /dev/null +++ b/solutions/cncf-generated/openkruise/openkruise-1336-docs-add-in-place-workload-vertical-scaling-proposal.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:33.159Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "openkruise: docs: add in-place workload vertical scaling proposal", + "description": "### Describe what this PR does\nAdd a proposal for In-place Workload Vertical Scaling.\n\nFixes #1212", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "## Sonatype Lift is retiring\nSonatype Lift will be retiring on Sep 12, 2023, with its analysis stopping on Aug 12, 2023. We understand that this news may come as a disappointment, and Sonatype is committed to helping you transition off it seamlessly. If you’d like to retain your data, please export your issues from the web console.\nWe are extremely grateful and thank you for your support over the years.\n\n[📖 Read about the impacts and timeline](https://www.sonatype.com/lift-retirement)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "openkruise", + "incubating", + "app-definition", + "wontfix", + "do-not-merge-hold", + "size-xl" + ], + "category": "workloads", + "cncfProjects": [ + "openkruise" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/openkruise/kruise/pull/1336", + "sourceRepo": "openkruise/kruise", + "reactions": 5, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:46:33.160Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentelemetry/opentelemetry-1431-new-meeting-recording-workflow.json b/solutions/cncf-generated/opentelemetry/opentelemetry-1431-new-meeting-recording-workflow.json new file mode 100644 index 00000000..36cd5034 --- /dev/null +++ b/solutions/cncf-generated/opentelemetry/opentelemetry-1431-new-meeting-recording-workflow.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:35.888Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opentelemetry: New meeting recording workflow", + "description": "Resolves #1406\nResolves #964\nResolves #935\nResolves #863\nResolves #715\nResolves #451\n\nNew meeting workflow is to no longer upload meetings to YouTube, but instead to make Zoom cloud recordings available via Google spreadsheet using Zapier integration:\n\n![image](https://user-images.githubusercontent.com/218610/231894712-fd46d01c-0661-49dd-984c-44f9b5201b8f.png)\n\nHere's an example recording: https://zoom.us/rec/share/6VEDYb5zV-hDAT-mDr5W_A9FDYrE6WyTeWMRWz8xqKgJKOypdEwr9rDDerW2xMDj.zz6qA84Yxqces8s4\n\nNote that this includes Zoom chat messages which YouTube videos were previously missing.\n\nThe naming relies on splitting out separate Zoom ID per meeting series, which is documented in #1415.\n\nI verified that all Zoom cloud recordings exist back to the beginning of the project, so expiring too soon doesn't appear to be a problem, though we may still wish to expire them (deferring this issue to #562).\n\nNote that this Google spreadsheet only contains meeting recordings starting from 4/10/2023 (w", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "The old YouTube link send people to a page with no recordings, and the entire archive appears to be gone.\n\nThis doc update reflects proposal here: https://github.com/open-telemetry/community/pull/1431/", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "opentelemetry", + "incubating", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "opentelemetry" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/open-telemetry/community/pull/1431", + "sourceRepo": "open-telemetry/community", + "reactions": 4, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:46:35.888Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentelemetry/opentelemetry-1687-proposal-for-sig-opentelemetry-on-mainframe.json b/solutions/cncf-generated/opentelemetry/opentelemetry-1687-proposal-for-sig-opentelemetry-on-mainframe.json new file mode 100644 index 00000000..fc379668 --- /dev/null +++ b/solutions/cncf-generated/opentelemetry/opentelemetry-1687-proposal-for-sig-opentelemetry-on-mainframe.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:37.111Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opentelemetry: Proposal for SIG OpenTelemetry on Mainframe", + "description": "This is the proposal for a work group and project focusing on enhancing OpenTelemetry to support the Mainframe. For details, please see the proposal in projects/mainframe.md.\n\nResolves #1686.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "I confirm to contribute to this SIG.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "opentelemetry", + "incubating", + "observability", + "area-project-proposal" + ], + "category": "observability", + "cncfProjects": [ + "opentelemetry" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/open-telemetry/community/pull/1687", + "sourceRepo": "open-telemetry/community", + "reactions": 2, + "comments": 26 + }, + "security": { + "scannedAt": "2026-02-27T17:46:37.112Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentelemetry/opentelemetry-2018-sig-proposal-for-sustainability-metrics.json b/solutions/cncf-generated/opentelemetry/opentelemetry-2018-sig-proposal-for-sustainability-metrics.json new file mode 100644 index 00000000..7a5f672f --- /dev/null +++ b/solutions/cncf-generated/opentelemetry/opentelemetry-2018-sig-proposal-for-sustainability-metrics.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:38.313Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opentelemetry: SIG Proposal for Sustainability Metrics", + "description": "This proposes a project and working group for establishing Semantic Conventions for sustainability. \n\nWe aim to unify and and establish standards for monitoring sustainability metrics\n\nResolves #2020", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "As discussed during today's GC triage, @danielgblanco will be a GC sponsor, and @pyohannes is the community sponsor delegated by me. I tried to make the required changes to this PR, but it looks like I can't. Here's the proposed diff:\n\n```diff\ndiff --git a/projects/env-semconv.md b/projects/env-semconv.md\nindex 7e230e2..58aa81a 100644\n--- a/projects/env-semconv.md\n+++ b/projects/env-semconv.md\n@@ -24,8 +24,8 @@ We want to have a unified conventions in order to better facilitate measuring su\n - @spazzy757\n \n **Sponsoring Members**\n-- @jpkrohling \n-- @pyohannes \n+- @danielgblanco\n+- @pyohannes (delegated by @jpkrohling, as per [project proposal guidelines](https://github.com/open-telemetry/community/blob/main/project-management.md#project-proposal))\n \n **Engineers**\n - @gabibeyer\n ```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "opentelemetry", + "incubating", + "observability", + "area-project-proposal" + ], + "category": "observability", + "cncfProjects": [ + "opentelemetry" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/open-telemetry/community/pull/2018", + "sourceRepo": "open-telemetry/community", + "reactions": 1, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:46:38.313Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentofu/opentofu-1716-rfc-tofu-version-compatibility.json b/solutions/cncf-generated/opentofu/opentofu-1716-rfc-tofu-version-compatibility.json new file mode 100644 index 00000000..e4504422 --- /dev/null +++ b/solutions/cncf-generated/opentofu/opentofu-1716-rfc-tofu-version-compatibility.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:50:11.261Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opentofu: RFC: Tofu Version Compatibility", + "description": "In this RFC, we investigate current problems around software versioning and propose some potential solutions.\n\nThis is contingent on the [.tofu extension RFC](https://github.com/opentofu/opentofu/pull/1699) [merged]\n\nResolves #1708\n\n## Target Release\n\n1.10.0\n\n## Checklist\n\n- [x] I have read the [contribution guide](https://github.com/opentofu/opentofu/blob/main/CONTRIBUTING.md).\n- [x] I have not used an AI coding assistant to create this PR.\n- [x] I have written all code in this PR myself OR I have marked all code I have not written myself (including modified code, e.g. copied from other places and then modified) with a comment indicating where it came from.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "We had a discussion yesterday among the core team about how we should be handling versioning. I'll update the RFC today with the changes discussed.\n\n* Ignore terraform version requirements if they’re larger than 1.6.0\n* If any module’s version requirements were ignored this way, display a warning to the user\n* In debug logs, log each such occurrence\n* Add a method (env/flag) to disable the warning", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "required_version = \">= 1.6.2\"", + "required_version = \">= 1.9\"" + ] + } + }, + "metadata": { + "tags": [ + "opentofu", + "sandbox", + "app-definition", + "rfc" + ], + "category": "workloads", + "cncfProjects": [ + "opentofu" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/opentofu/opentofu/pull/1716", + "sourceRepo": "opentofu/opentofu", + "reactions": 3, + "comments": 20 + }, + "security": { + "scannedAt": "2026-02-27T17:50:11.262Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentofu/opentofu-1900-add-exclude-flag-support.json b/solutions/cncf-generated/opentofu/opentofu-1900-add-exclude-flag-support.json new file mode 100644 index 00000000..cd8e6843 --- /dev/null +++ b/solutions/cncf-generated/opentofu/opentofu-1900-add-exclude-flag-support.json @@ -0,0 +1,44 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:50:05.552Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opentofu: Add exclude flag support", + "description": "Adds support for exclude flag.\n\nWorks very similarly to the `-target` flag, but does the opposite.\n- We basically find all excluded targets, and all targets that depend on them, and then pick all other targetable nodes in the graph. Then, act the same way as with targets - Target all picked nodes and their descendants, and target any output that all of its dependencies are targeted\n- There's validation that `-target` and `-exclude` flags cannot be used together. This possibly could be changed and adapted in the future to work well together\n- We error if an `-exclude` flag was provided for a remote run, so as not to confuse users, thinking that a run would exclude specific resources when actually it doesn't\n\nNote: Most of the code in this PR is inspired by the current implementation of the target flag. In most cases, you'd see the target flag's relevant implementation right before the newly added exclude flag implementation.\n\nResolves https://github.com/opentofu/opentofu/issues/426\n\n## ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Reminder for the PR assignee: If this is a user-visible change, please update the changelog as part of the PR.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "opentofu", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "opentofu" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/opentofu/opentofu/pull/1900", + "sourceRepo": "opentofu/opentofu", + "reactions": 11, + "comments": 1 + }, + "security": { + "scannedAt": "2026-02-27T17:50:05.552Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentofu/opentofu-2054-300-implement-statically-evaluated-provider-references.json b/solutions/cncf-generated/opentofu/opentofu-2054-300-implement-statically-evaluated-provider-references.json new file mode 100644 index 00000000..2520e931 --- /dev/null +++ b/solutions/cncf-generated/opentofu/opentofu-2054-300-implement-statically-evaluated-provider-references.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:50:08.730Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opentofu: #300: Implement statically evaluated provider references", + "description": "Resolves #300.\n\nThis PR encapsulates all the #300 implementation parts made by @cam72cam, @Evi1Pumpkin, and @ollevche.\n\nRelated info:\n* https://github.com/opentofu/opentofu/issues/300\n* [RFC on statically evaluated provider references](https://github.com/opentofu/opentofu/blob/main/rfc/20240513-static-evaluation-providers.md)\n* [Provider references docs](https://github.com/opentofu/opentofu/blob/main/docs/provider-references.md)\n* [Detailed breakdown of graph-related changes](https://github.com/opentofu/opentofu/pull/1963#issue-2502217685)\n\nTODOs:\n\n- [x] We have a little bug with this approach that if an existing resource in the state had a ResourceProvider and its configuration changed and now it uses InstanceProvider (or vice versa), we need to explicitly delete the old provider configuration. This is a known bug that I still need to handle and is quite easy to fix. [(c) Ronny](https://github.com/opentofu/opentofu/pull/1963#issue-2502217685)\n- [x] Flows that were altered and need som", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Part of #300\nMain PR: #2054\n\nIn debugging preliminary issues reported in #2054, @ollevche and I stumbled upon some interesting edge cases in the existing provider resolution code. We did some refactoring in e73c28e1b527fe9d45bc1b8e8fe4df263362cebd that made it easier to spot issues like the ones reported.\n\nFrom there, I started experimenting with some edge cases to try to try to hit other failure scenarios and found a few. In the end, I refactoring and expanding on the core pieces that @Evi1Pumpkin wrote previously and took a really hard look at the *quirky* behaviors in the ProviderTransformer, even before these changes.\n\n## Changes From @Evi1Pumpkin's Implementation:\n* ProviderConfig on the Resource State object (internal representation, not V4 serializer) is *moved* to the Instance instead of existing in two places.\n * Conceptually this is much easier to grok.\n * It removes a lot of panic(*) calls in the code\n * Harder to hold it wrong\n * Translated between resource/instance at the V4 encode/decode functions.\n * This also implies the movement of storedProviderConfig and ResolvedResourceProvider to `NodeAbstractResourceInstance` from `NodeAbstractResource\n* Attaching resource instance states to `NodeResourceAbstract` for orphaned nodes\n * When dealing with resources that are partially removed from the config (orphaned instances, with others remaining), we need to be able to attach the providers required by those orphaned instances.\n * The solution I found (with lar", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "!!!!!!!!!!!!!!!!!!!!!!!!!!! OPENTOFU CRASH !!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n\r\nOpenTofu crashed! This is always indicative of a bug within OpenTofu.\r\nPlease report the crash with OpenTofu[1] so that we can fix this.\r\n\r\nWhen reporting bugs, please include your OpenTofu version, the stack trace\r\nshown below, and any additional information which may help replicate the issue.\r\n\r\n[1]: https://github.com/opentofu/opentofu/issues\r\n\r\n!!!!!!!!!!!!!!!!!!!!!!!!!!! OPENTOFU CRASH !!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n\r\nResolvedProvider for module.secondary_region[\"Milan\"].module.vpc.aws_subnet.public[0] cannot get a provider\r\ngoroutine 4735 [running]:\r\nruntime/debug.Stack()\r\n\t/usr/lib/go/src/runtime/debug/stack.go:26 +0x5e\r\nruntime/debug.PrintStack()\r\n\t/usr/lib/go/src/runtime/debug/stack.go:18 +0x13\r\ngithub.com/opentofu/opentofu/internal/logging.panicHandler({0x2d25780, 0xc005db1380}, {0xc0066eb400, 0x39b, 0x400})\r\n\t/home/francois/BIG/Disposable/opentofu/internal/logging/panic.go:96 +0xd6\r\ngithub.com/opentofu/opentofu/internal/tofu.(*Graph).walk.PanicHandlerWithTraceFn.func2()\r\n\t/home/francois/BIG/Disposable/opentofu/internal/logging/panic.go:82 +0x8a\r\npanic({0x2d25780?, 0xc005db1380?})\r\n\t/usr/lib/go/src/runtime/panic.go:785 +0x132\r\ngithub.com/opentofu/opentofu/internal/tofu.(*NodeAbstractResourceInstance).ResolvedProvider(0xc002876508?)\r\n\t/home/francois/BIG/Disposable/opentofu/internal/tofu/node_resource_abstract_instance.go:99 +0x454\r\ngithub.com/opentofu/opentofu/internal/tofu.(*NodePlannableResourceInstance).managedResourceExecute(0xc0065c5340, {0x39e4d08, 0xc00181fb00})\r\n\t/home/francois/BIG/Disposable/opentofu/internal/tofu/node_resource_plan_instance.go:167 +0xce\r\ngithub.com/opentofu/opentofu/internal/tofu.(*NodePlannableResourceInstance).Execute(0x39b5580?, {0x39e4d08?, 0xc00181fb00?}, 0xb0?)\r\n\t/home/francois/BIG/Disposable/opentofu/internal/tofu/node_resource_plan_instance.go:92 +0x8a\r\ngithub.com/opentofu/opentofu/internal/tofu.(*ContextGraphWalker).Execute(0xc0044eb200, {0x39e4d08, 0xc00181fb00}, {0x7d7b245c1528, 0xc0065c5340})\r\n\t/home/francois/BIG/Disposable/opentofu/internal/tofu/graph_walk_context.go:147 +0xb5\r\ngithub.com/opentofu/opentofu/internal/tofu.(*Graph).walk.func1({0x3316340, 0xc0065c5340})\r\n\t/home/francois/BIG/Disposable/opentofu/internal/tofu/graph.go:86 +0x3f8\r\ngithub.com/opentofu/opentofu/internal/dag.(*Walker).walkVertex(0xc000b4b080, {0x3316340, 0xc0065c5340}, 0xc000b138c0)\r\n\t/home/francois/BIG/Disposable/opentofu/internal/dag/walk.go:385 +0x2d1\r\ncreated by github.com/opentofu/opentofu/internal/dag.(*Walker).Update in goroutine 2947\r\n\t/home/francois/BIG/Disposable/opentofu/internal/dag/walk.go:308 +0xfb3\r\nWith go-routine called from:\r\ngoroutine 2947 [running]:\r\nruntime/debug.Stack()\r\n\t/usr/lib/go/src/runtime/debug/stack.go:26 +0x5e\r\ngithub.com/opentofu/opentofu/internal/logging.PanicHandlerWithTraceFn(...)\r\n\t/home/francois/BIG/Disposable/opentofu/internal/logging/panic.go:72\r\ngithub.com/opentofu/opentofu/internal/tofu.(*Graph).walk(0xc00693ce00, {0x39c4fc8, 0xc0044eb200})\r\n\t/home/francois/BIG/Disposable/opentofu/internal/tofu/graph.go:51 +0x3a\r\ngithub.com/opentofu/opentofu/internal/tofu.(*Graph).walk.func1({0x32ac540, 0xc003f21770})\r\n\t/home/francois/BIG/Disposable/opentofu/internal/tofu/graph.go:127 +0x833\r\ngithub.com/opentofu/opentofu/internal/dag.(*Walker).walkVertex(0xc0028cf5c0, {0x32ac540, 0xc003f21770}, 0xc00127c840)\r\n\t/home/francois/BIG/Disposable/opentofu/internal/dag/walk.go:385 +0x2d1\r\ncreated by github.com/opentofu/opentofu/internal/dag.(*Walker).Update in goroutine 81\r\n\t/home/francois/BIG/Disposable/opentofu/internal/dag/walk.go:308 +0xfb3", + "secondary_region/modules/vpc/main.tf", + "The `secondary_region` has a lot of submodules, it uses the `aws.aws_main` provider in order to establish VPC peerings with the main region. It works well if I just duplicate the `secondary_region` module and give it different names manually, which is what I have been doing until now and hoping this PR would improve.\r\n\r\n\nThank you @kubrickfr for giving it a chance! I'll take a look at it tomorrow. I put the PR as draft because after all the merges I have to take another look at what needs to be glued properly and double-check existing todos in the code (it might be the reason of the crash).\n@ollevche Thanks! I did end-up removing all the weirdness with extra providers declared in submodules but to no avail, I always end up with the same crash in the submodules.\n@ollevche d3cbeb832713d1e03f9694a4e00d0db059cfd179 fixes the crash and deploys my configuration even with the extra providers for vpc peering between regions in sub sub modules.\r\nA big thank you!\nOk, after trying to apply a change, I'm getting:" + ] + } + }, + "metadata": { + "tags": [ + "opentofu", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "opentofu" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/opentofu/opentofu/pull/2054", + "sourceRepo": "opentofu/opentofu", + "reactions": 2, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:50:08.730Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentofu/opentofu-2628-enhance-removed-block-with-lifecycle-and-provisioner-functionaliti.json b/solutions/cncf-generated/opentofu/opentofu-2628-enhance-removed-block-with-lifecycle-and-provisioner-functionaliti.json new file mode 100644 index 00000000..0caa9f16 --- /dev/null +++ b/solutions/cncf-generated/opentofu/opentofu-2628-enhance-removed-block-with-lifecycle-and-provisioner-functionaliti.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:50:09.939Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opentofu: Enhance `removed` block with `lifecycle` and `provisioner` functionalities", + "description": "Resolves #2556\n\n_Recommended to be reviewed per commit since there are a lot of changes. Reworked the commit history and split the changes per commit and added information in each of those for easing the review a little_\n\nThis PR adds two new functionalities on the `removed` block:\n* Now supports a `lifecycle` block that can have one attribute: `destroy` that can have two values `true`/`false`.\n* Now supports `provisioner` blocks for the author of the `removed` block to be able to move the `provisioner` blocks from the actual removed `resource`. This is needed because `removed` and the targeted `resource` block cannot be in the same time in the configuration. Therefore, the user needs to have an option to still execute the provisioners before destroying the `resource`.\n* Some additional warnings/errors that were missed in the initial implementation\n\n## `lifecycle` block\nThis block is introduced in order to control the way a `resource` is removed.\n* If `lifecycle.destroy=true`, the `res", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Our default behavior for the `removed` block is good and corresponds to its intended purpose, in my opinion. \nWe should probably keep it and document the differences with the Terraform.\nIf we go that route, we might as well not make this a breaking change and leave the lifecycle block as optional.\nAbout the provisioner block - I've experimented with it and couldn't come up with a simple implementation, excluding it in this PR and getting back to it if we get an issue later on with good traction is the best course of action. \n\nEither way, we need to bring this up on triage to make a decision.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "lifecycle {\r\n destroy = false\r\n }\r\n\r\n provisioner \"local-exec\" {\r\n when = destroy\r\n command = \"echo 'Instance ${self.id} has been destroyed.'\"\r\n }", + "> lifecycle {\r\n> destroy = false\r\n> }\r\n> \r\n> provisioner \"local-exec\" {\r\n> when = destroy\r\n> command = \"echo 'Instance ${self.id} has been destroyed.'\"\r\n> }\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "opentofu", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "opentofu" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/opentofu/opentofu/pull/2628", + "sourceRepo": "opentofu/opentofu", + "reactions": 3, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:50:09.940Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentofu/opentofu-579-statemgr-make-filesystem-impl-correct-and-fast.json b/solutions/cncf-generated/opentofu/opentofu-579-statemgr-make-filesystem-impl-correct-and-fast.json new file mode 100644 index 00000000..917dfa0a --- /dev/null +++ b/solutions/cncf-generated/opentofu/opentofu-579-statemgr-make-filesystem-impl-correct-and-fast.json @@ -0,0 +1,43 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:50:04.431Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opentofu: statemgr: Make Filesystem impl \"correct\" and fast", + "description": "The existing Filesystem statemgr implementation claims to use in-memory transient storage and the local filesystem for persistent storage.\n\n> // Filesystem is a full state manager that uses a file in the local filesystem\n> // for persistent storage.\n> //\n> // The transient storage for Filesystem is always in-memory.\n\nHowever, the actual WriteState impl admits to being \"incorrect\":\n\n> // WriteState is an incorrect implementation of Writer that actually also\n> // persists.\n\nIt appears to have been this way since 2018. Perhaps this incorrectness is load-bearing, but given that this is only one of many implementations of the state manager interface, I suspect it would be fine to fix the implementation to be \"correct\".\n\nThis change splits off the expensive parts of WriteState into the PersistState method, which was previously a no-op.\n\nWithout this change, the time we spend maintaining is roughly quadratic with the number of resources being managed because each resouce causes:\n\n1. The state", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "My colleague @jonjohnsonjr has been doing quite a lot of profiling of `terraform`, and has found some possible areas where time is being wasted unnecessarily.\n\nThe following flamegraph shows where time is spent running `terraform apply` on an admittedly pretty pathologically large TF graph (aren't they all 🤷). It shows TF state being marshalled to JSON 4 times, taking up about 1/2 of the total time. This is done so that the previous/new state can be diffed, to determine if any changes actually need to be made.\n\nRoughly half of each of those JSON marshallings is `appendIndent`, which is only called with `json.MarshalIndent`.\n\n\"Screenshot\n\nThe theory is that dropping indentation would cut this 22s apply down to about 17s.\n\nI am not a deep enough TF expert to know what other implications this change might have. Based on a brief look around, I don't believe it would have any impact on state diffing, since both sides of the diff go through this same code path to accomplish the diff (and so would both be unmarshalled or not).\n\nThere might be other cases where state is expected/required to be marshalled for some reason; AFAIK, no human is expected to read that directly, but maybe something somewhere cares about there being newlines and spaces. If nothing else I'd love to learn through this process what that might be, if this change isn", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "opentofu", + "sandbox", + "app-definition", + "accepted" + ], + "category": "workloads", + "cncfProjects": [ + "opentofu" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/opentofu/opentofu/pull/579", + "sourceRepo": "opentofu/opentofu", + "reactions": 11, + "comments": 6 + }, + "security": { + "scannedAt": "2026-02-27T17:50:04.431Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentofu/opentofu-887-website-split-installation-instructions.json b/solutions/cncf-generated/opentofu/opentofu-887-website-split-installation-instructions.json new file mode 100644 index 00000000..e1951f01 --- /dev/null +++ b/solutions/cncf-generated/opentofu/opentofu-887-website-split-installation-instructions.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:50:07.180Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "opentofu: website: Split installation instructions", + "description": "This change splits the installation instructions into separate pages and provides manual installation instructions for the OS packages.\n\nResolves #878\nResolves #879\n\n## Preview\n\n![Screenshot of the OpenTofu website, detailing the various installation methods on the left in menu as well as in the main page.](https://github.com/opentofu/opentofu/assets/86970079/c7381f24-0d93-40c4-8864-486855ad60f1)\n\n## TODO\n\n- [X] Alpine Linux instructions\n- [x] Alpine Linux instructions: Where's the APK signing key?\n- [X] DEB instructions\n- [X] DEB instructions: Test on Debian\n- [X] DEB instructions: Test on Ubuntu\n- [X] DEB instructions: Test on Raspbian\n- [x] RPM instructions\n- [x] RPM instructions: Test on ~RHEL~CentOS Stream\n- [x] RPM instructions: Test on Fedora\n- [x] RPM instructions: Test on openSUSE\n- [X] Snap instructions\n- [x] Snap instructions: Test on Ubuntu\n- [X] Portable instructions\n- [X] Portable instructions: Test on Linux\n- [X] Portable instructions: Test on MacOS\n- [X] Portable instru", + "type": "deploy", + "status": "completed", + "resolution": { + "summary": "This PR adds the [`raw-loader`](https://v4.webpack.js.org/loaders/raw-loader/) package to support including raw files (such as scripts) in the documentation.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apk add opentofu --repository=http://dl-cdn.alpinelinux.org/alpine/edge/testing/" + ] + } + }, + "metadata": { + "tags": [ + "opentofu", + "sandbox", + "app-definition" + ], + "category": "workloads", + "cncfProjects": [ + "opentofu" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/opentofu/opentofu/pull/887", + "sourceRepo": "opentofu/opentofu", + "reactions": 5, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:50:07.180Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/openyurt/openyurt-1124-feature-yurtadm-reset-join-modification-do-not-remove-k8s-binaries.json b/solutions/cncf-generated/openyurt/openyurt-1124-feature-yurtadm-reset-join-modification-do-not-remove-k8s-binaries.json new file mode 100644 index 00000000..22d8f14b --- /dev/null +++ b/solutions/cncf-generated/openyurt/openyurt-1124-feature-yurtadm-reset-join-modification-do-not-remove-k8s-binaries.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:41.388Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "openyurt: Feature: yurtadm reset/join modification. Do not remove k8s binaries, add flag for using local cni binaries.", + "description": "/kind feature\n\nyurtadm reset/join modification. Do not remove k8s binaries, add flag for using local cni binaries.\n\nFixes #1095\n\n- Do not remove `kubeadm/kubelet/kubelet` and cni binaries on `yurtadm reset`.\n- Add new flag `--reuse-cni-bin` for `yurtadm join`. It determines whether to reuse local CNI binaries or not. Default value is `false`.", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Welcome @Windrow14! It looks like this is your first PR to openyurtio/openyurt 🎉", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "openyurt", + "incubating", + "app-definition", + "approved", + "lgtm", + "size-m", + "kind-feature" + ], + "category": "workloads", + "cncfProjects": [ + "openyurt" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/openyurtio/openyurt/pull/1124", + "sourceRepo": "openyurtio/openyurt", + "reactions": 2, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:46:41.389Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/openyurt/openyurt-1600-proposal-for-multi-region-workloads-configuration-rendering-engine.json b/solutions/cncf-generated/openyurt/openyurt-1600-proposal-for-multi-region-workloads-configuration-rendering-engine.json new file mode 100644 index 00000000..80f46d06 --- /dev/null +++ b/solutions/cncf-generated/openyurt/openyurt-1600-proposal-for-multi-region-workloads-configuration-rendering-engine.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:40.482Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "openyurt: Proposal for Multi-region workloads configuration rendering engine", + "description": "#### What type of PR is this?\n> Uncomment only one `/kind <>` line, hit enter to put that in a new line, and remove leading whitespace from that line:\n> /kind bug\n> /kind documentation\n> /kind enhancement\n> /kind good-first-issue\n> /kind feature\n> /kind question\n> /kind design\n> /sig ai\n> /sig iot\n> /sig network\n> /sig storage\n\nkind feature\n#### What this PR does / why we need it:\n\n#### Which issue(s) this PR fixes:\n\nFixes #1435 \n\n#### Special notes for your reviewer:\n\n#### Does this PR introduce a user-facing change?\n\n```release-note\n\n```\n\n#### other Note", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@vie-serendipity: GitHub didn't allow me to assign the following users: your_reviewer.\n\nNote that only [openyurtio members](https://github.com/orgs/openyurtio/people), repo collaborators and people who have commented on this issue/PR can be assigned. Additionally, issues/PRs can only have 10 assignees at the same time.\nFor more information please see [the contributor guide](https://git.k8s.io/community/contributors/guide/#issue-assignment-in-github)\n\n
\n\nIn response to [this](https://github.com/openyurtio/openyurt/pull/1600):\n\n>\n>\n>#### What type of PR is this?\n>> Uncomment only one `/kind <>` line, hit enter to put that in a new line, and remove leading whitespace from that line:\n>> /kind bug\n>> /kind documentation\n>> /kind enhancement\n>> /kind good-first-issue\n>> /kind feature\n>> /kind question\n>> /kind design\n>> /sig ai\n>> /sig iot\n>> /sig network\n>> /sig storage\n>\n>kind feature\n>#### What this PR does / why we need it:\n>\n>#### Which issue(s) this PR fixes:\n>\n>Fixes #1435 \n>\n>#### Special notes for your reviewer:\n>\n>\n>#### Does this PR introduce a user-facing change?\n>\n>```release-note\n>\n>```\n>\n>#### other Note\n>\n>\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n\r\n\n\n@vie-serendipity: GitHub didn't allow me to assign the following users: your_reviewer.\n\nNote that only [openyurtio members](https://github.com/orgs/openyurtio/people), repo collaborators and people who have commented on this issue/PR can be assigned. Additionally, issues/PRs can only have 10 assignees at the same time.\nFor more information please see [the contributor guide](https://git.k8s.io/community/contributors/guide/#issue-assignment-in-github)\n\n
\n\nIn response to [this](https://github.com/openyurtio/openyurt/pull/1600):\n\n>\r\n>\r\n>#### What type of PR is this?\r\n>> Uncomment only one `/kind <>` line, hit enter to put that in a new line, and remove leading whitespace from that line:\r\n>> /kind bug\r\n>> /kind documentation\r\n>> /kind enhancement\r\n>> /kind good-first-issue\r\n>> /kind feature\r\n>> /kind question\r\n>> /kind design\r\n>> /sig ai\r\n>> /sig iot\r\n>> /sig network\r\n>> /sig storage\r\n>\r\n>kind feature\r\n>#### What this PR does / why we need it:\r\n>\r\n>#### Which issue(s) this PR fixes:\r\n>\r\n>Fixes #1435 \r\n>\r\n>#### Special notes for your reviewer:\r\n>\r\n>\r\n>#### Does this PR introduce a user-facing change?\r\n>\r\n>", + ">\r\n>#### other Note\r\n>\r\n>\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n## [Codecov](https://app.codecov.io/gh/openyurtio/openyurt/pull/1600?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=openyurtio) Report\n> Merging [#1600](https://app.codecov.io/gh/openyurtio/openyurt/pull/1600?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=openyurtio) (edbbea0) into [master](https://app.codecov.io/gh/openyurtio/openyurt/commit/d15078f7bc19857e87952ba418fb1dd71d191bfa?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=openyurtio) (d15078f) will **not change** coverage.\n> The diff coverage is `n/a`." + ] + } + }, + "metadata": { + "tags": [ + "openyurt", + "incubating", + "app-definition", + "approved", + "lgtm", + "size-l" + ], + "category": "workloads", + "cncfProjects": [ + "openyurt" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/openyurtio/openyurt/pull/1600", + "sourceRepo": "openyurtio/openyurt", + "reactions": 4, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:46:40.482Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/openyurt/openyurt-617-refactor-yurtctl-convert-use-node-servant-in-dispatch-job.json b/solutions/cncf-generated/openyurt/openyurt-617-refactor-yurtctl-convert-use-node-servant-in-dispatch-job.json new file mode 100644 index 00000000..37a4666b --- /dev/null +++ b/solutions/cncf-generated/openyurt/openyurt-617-refactor-yurtctl-convert-use-node-servant-in-dispatch-job.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:42.384Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "openyurt: refactor: yurtctl convert use node-servant in dispatch job", + "description": "#### What type of PR is this?\n> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespace from that line:\n> /kind bug\n> /kind documentation\n> /kind enhancement\n> /kind good-first-issue\n> /kind feature\n> /kind question\n> /kind design\n> /sig ai\n> /sig iot\n> /sig network\n> /sig storage\n> /sig storage\n\n/kind feature\n\n#### What this PR does / why we need it:\nThis PR makes the following changes:\n1. Remove convert cloudnode/edgenode sub command.\n2. Replace the \"yurt-servant\" job in yurtctl with \"node-servant\".\n3. Automatically set \"--cert-ip\" to yurt-tunnel-server if the user specify the tunnel server address in \"yurtctl convert\".\n\nThe documentation needs to be updated too, I will create another PR to revise the doc later.\n#### Which issue(s) this PR fixes:\n\nFixes #546\n\n#### Special notes for your reviewer:\n\n/assign @rambohe-ch @adamzhoul \n\n#### Does this PR introduce a user-facing change?\n\n```release-note\nThe subcommand \"cloudnode\" and \"edgenod", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@DrmagicE: GitHub didn't allow me to assign the following users: your_reviewer.\n\nNote that only [openyurtio members](https://github.com/orgs/openyurtio/people), repo collaborators and people who have commented on this issue/PR can be assigned. Additionally, issues/PRs can only have 10 assignees at the same time.\nFor more information please see [the contributor guide](https://git.k8s.io/community/contributors/guide/#issue-assignment-in-github)\n\n
\n\nIn response to [this](https://github.com/openyurtio/openyurt/pull/617):\n\n>\n>\n>\n>#### What type of PR is this?\n>> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespace from that line:\n>> /kind bug\n>> /kind documentation\n>> /kind enhancement\n>> /kind good-first-issue\n>> /kind feature\n>> /kind question\n>> /kind design\n>> /sig ai\n>> /sig iot\n>> /sig network\n>> /sig storage\n>> /sig storage\n>\n>/kind feature\n>\n>#### What this PR does / why we need it:\n>This PR makes the following changes:\n>1. Remove convert cloudnode/edgenode sub command.\n>2. Replace the \"yurt-servant\" job in yurtctl with \"node-servant\".\n>3. Automatically set \"--cert-ip\" to yurt-tunnel-server if the user specify the tunnel server address in \"yurtctl convert\".\n>\n>The documentation needs to be updated too, I will create another PR to revise the doc later.\n>#### Which issue(s) this PR fixes:\n>\n>Fixes #546\n>\n>#### Special notes for your reviewer:\n>\n>/assign @rambohe-ch @adamzhoul \n>\n>#### Does this PR introduce a user-f", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "#### other Note\r\n\r\n\n\n@DrmagicE: GitHub didn't allow me to assign the following users: your_reviewer.\n\nNote that only [openyurtio members](https://github.com/orgs/openyurtio/people), repo collaborators and people who have commented on this issue/PR can be assigned. Additionally, issues/PRs can only have 10 assignees at the same time.\nFor more information please see [the contributor guide](https://git.k8s.io/community/contributors/guide/#issue-assignment-in-github)\n\n
\n\nIn response to [this](https://github.com/openyurtio/openyurt/pull/617):\n\n>\r\n>\r\n>\r\n>#### What type of PR is this?\r\n>> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespace from that line:\r\n>> /kind bug\r\n>> /kind documentation\r\n>> /kind enhancement\r\n>> /kind good-first-issue\r\n>> /kind feature\r\n>> /kind question\r\n>> /kind design\r\n>> /sig ai\r\n>> /sig iot\r\n>> /sig network\r\n>> /sig storage\r\n>> /sig storage\r\n>\r\n>/kind feature\r\n>\r\n>#### What this PR does / why we need it:\r\n>This PR makes the following changes:\r\n>1. Remove convert cloudnode/edgenode sub command.\r\n>2. Replace the \"yurt-servant\" job in yurtctl with \"node-servant\".\r\n>3. Automatically set \"--cert-ip\" to yurt-tunnel-server if the user specify the tunnel server address in \"yurtctl convert\".\r\n>\r\n>The documentation needs to be updated too, I will create another PR to revise the doc later.\r\n>#### Which issue(s) this PR fixes:\r\n>\r\n>Fixes #546\r\n>\r\n>#### Special notes for your reviewer:\r\n>\r\n>/assign @rambohe-ch @adamzhoul \r\n>\r\n>#### Does this PR introduce a user-facing change?\r\n>\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "openyurt", + "incubating", + "app-definition", + "approved", + "lgtm", + "size-xl", + "kind-feature" + ], + "category": "workloads", + "cncfProjects": [ + "openyurt" + ], + "targetResourceKinds": [ + "Job", + "Node" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/openyurtio/openyurt/pull/617", + "sourceRepo": "openyurtio/openyurt", + "reactions": 1, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:46:42.384Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/operator-framework/operator-framework-1162-internal-scaffold-olm-catalog-populate-csv-customresourc.json b/solutions/cncf-generated/operator-framework/operator-framework-1162-internal-scaffold-olm-catalog-populate-csv-customresourc.json new file mode 100644 index 00000000..17cd69ed --- /dev/null +++ b/solutions/cncf-generated/operator-framework/operator-framework-1162-internal-scaffold-olm-catalog-populate-csv-customresourc.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:45.540Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "operator-framework: internal/scaffold/olm-catalog: populate CSV customresourcedefinitions from Go type annotations", + "description": "**Description of the change:** use doc and code comments on CR types to populate `customresourcedefinitions.owned` fields except for `actionDescriptors`, which will be persisted to the new CSV.\n\n**Motivation for the change:** see #1132\n\nCloses #1132", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**Description of the change:** This PR defines new structs for: the variables that scorecard tests need access to, scores, tests, and test suites. It defines each test and test suite using these new structs and now weights the tests based on how important they should be in the final score. It also splits `checkSpecAndStat` (as that was acutally 2 separate tests plus a readiness checker) and moves the readiness checking functionality into its own function.\n\n**Motivation for the change:** Closes #960", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "customresourcedefinitions:\r\n owned:\r\n - description: Backup is the Schema for the backups API\r\n kind: Backup\r\n name: backups.postgresql-operator.dev4devs.com\r\n version: v1alpha1\r\n - description: Postgresql is the Schema for the postgresqls API\r\n kind: Postgresql\r\n name: postgresqls.postgresql-operator.dev4devs.com\r\n version: v1alpha1" + ] + } + }, + "metadata": { + "tags": [ + "operator-framework", + "incubating", + "app-definition", + "kind-feature", + "size-xxl", + "olm-integration", + "approved" + ], + "category": "workloads", + "cncfProjects": [ + "operator-framework" + ], + "targetResourceKinds": [ + "Customresourcedefinition" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/operator-framework/operator-sdk/pull/1162", + "sourceRepo": "operator-framework/operator-sdk", + "reactions": 3, + "comments": 31 + }, + "security": { + "scannedAt": "2026-02-27T17:46:45.540Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/operator-framework/operator-framework-1828-make-generated-inventory-define-implicit-localhost.json b/solutions/cncf-generated/operator-framework/operator-framework-1828-make-generated-inventory-define-implicit-localhost.json new file mode 100644 index 00000000..178c0025 --- /dev/null +++ b/solutions/cncf-generated/operator-framework/operator-framework-1828-make-generated-inventory-define-implicit-localhost.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:51.235Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "operator-framework: Make generated inventory define implicit localhost", + "description": "**Description of the change:**\nMakes localhost default to the same python interpreter as the one that is running ansible\n\n**Motivation for the change:**\nOur generated inventory wasn't setting the default ansible python interpreter for localhost to match Ansible's implicit localhost, which made running up local difficult.\n\ncloses #1437 \n\nThanks to @nmasse-itix", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/lgtm\n/approve", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "operator-framework", + "incubating", + "app-definition", + "size-xs", + "lgtm", + "approved" + ], + "category": "workloads", + "cncfProjects": [ + "operator-framework" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/operator-framework/operator-sdk/pull/1828", + "sourceRepo": "operator-framework/operator-sdk", + "reactions": 1, + "comments": 52 + }, + "security": { + "scannedAt": "2026-02-27T17:46:51.235Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/operator-framework/operator-framework-1991-allow-using-subset-of-endpoints-when-creating-a-servicem.json b/solutions/cncf-generated/operator-framework/operator-framework-1991-allow-using-subset-of-endpoints-when-creating-a-servicem.json new file mode 100644 index 00000000..4032c650 --- /dev/null +++ b/solutions/cncf-generated/operator-framework/operator-framework-1991-allow-using-subset-of-endpoints-when-creating-a-servicem.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:48.757Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "operator-framework: Allow using subset of endpoints when creating a ServiceMonitor", + "description": "metrics: add an ability to create a ServiceMonitor with only a subset of ports, provided as a list of port names.\n\nThis change adds 2 additional calls for creating a ServiceMonitor: `CreateServiceMonitorWithPorts` and `GenerateServiceMonitorWithPorts`, they allow providing a list of ports to add as endpoints in the created ServiceMonitor object. The list is compared with the ports in the provided service and only adds the endpoints that already exist in the initial service.\n\nCloses #1972 \n\nSigned-off-by: Alex Lourie ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Without this, while the ServiceMonitor gets created fine, Prometheus\nlooks in the wrong place, and doesn't pick up on any metrics.\n\nI've also tried to add the ability to set additional labels on the ServiceMonitor here.\n\nWhen provisioning a Prometheus server with a Prometheus CR, one often\nspecifies a ServiceMonitorSelector. In this case, this Prometheus will only\nscrape metrics from endpoints that are specified in ServiceMonitors\nthat are selected based on the labels specified in the\nServiceMonitorSelector.\n\nWithout the ability to set a label on the ServiceMonitor, then many\npeople would have to either not use the utility function from here, or\nupdate the resource after creation.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "...\r\ntime=\"2019-09-30T14:32:45Z\" level=info msg=\"Waiting for deployment/olm-operator rollout to complete\"\r\ntime=\"2019-09-30T14:32:45Z\" level=info msg=\" Waiting for deployment \\\"olm-operator\\\" to rollout: 0 of 1 updated replicas are available\"\r\ntime=\"2019-09-30T14:32:52Z\" level=info msg=\" Deployment \\\"olm-operator\\\" successfully rolled out\"\r\ntime=\"2019-09-30T14:32:52Z\" level=info msg=\"Waiting for deployment/catalog-operator rollout to complete\"\r\ntime=\"2019-09-30T14:32:52Z\" level=info msg=\" Deployment \\\"catalog-operator\\\" successfully rolled out\"\r\ntime=\"2019-09-30T14:32:52Z\" level=info msg=\"Waiting for deployment/packageserver rollout to complete\"\r\ntime=\"2019-09-30T14:32:52Z\" level=fatal msg=\"Failed to install OLM version \\\"latest\\\": deployment/packageserver failed to rollout: deployments.apps \\\"packageserver\\\" not found\"'\r\nMakefile:141: recipe for target 'test/subcommand/alpha-olm' failed" + ] + } + }, + "metadata": { + "tags": [ + "operator-framework", + "incubating", + "app-definition", + "size-s", + "lgtm", + "ok-to-test" + ], + "category": "workloads", + "cncfProjects": [ + "operator-framework" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/operator-framework/operator-sdk/pull/1991", + "sourceRepo": "operator-framework/operator-sdk", + "reactions": 1, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:46:48.757Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/operator-framework/operator-framework-2249-cmd-gen-csv-use-include-instead-of-csv-config.json b/solutions/cncf-generated/operator-framework/operator-framework-2249-cmd-gen-csv-use-include-instead-of-csv-config.json new file mode 100644 index 00000000..f3d433c4 --- /dev/null +++ b/solutions/cncf-generated/operator-framework/operator-framework-2249-cmd-gen-csv-use-include-instead-of-csv-config.json @@ -0,0 +1,51 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:47.088Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "operator-framework: cmd/.../gen-csv: use `--include` instead of CSV config", + "description": "**Description of the change:**\n* internal/scaffold/olm-catalog: replace CSVConfig with genutil.Config and update related code\n* cmd/operator-sdk/olmcatalog: use generator Config in scaffolds, remove config flag, add `--include` flag\n* doc: remove CSVConfig documentation, update CLI docs\n* internal/generate/util: add Config type and helper\n\n**Motivation for the change:** `CSVConfig` configures `olm-catalog gen-csv` to include data from certain files/dirs in the generation process. This config object may change significantly if we add/remove requirements for CSV generation in the future or users request more config fields. Instead of breaking config structure, the generator should look through all files in a default location to gather what data it needs to compile a CSV, including dirs/files passed in by `--include=[list of paths]`. `--include` is a generalization of specifying exact paths required, so we can get the same effect as a config with much less actual configuration. The defaul", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "**Description of the change:** \n* cmd/operator-sdk: replace CRD scaffold with CRD generator\n* internal/scaffold: remove CRD scaffold and tests\n* internal/generate/crd: CRD generator\n* test/test-framework: update operator with new CRD generator\n* internal/util/fileutil: DotPath prepends \"./\" or \".\\\" to a path depending on OS\n* doc: discuss storage version setting in CRDs\n* test/test-framework: use storage version annotation in APIs\n\n**Motivation for the change:** This PR aligns the CRD generator with controller-gen's. By using the same option strings one would pass to [`controller-gen`](https://github.com/kubernetes-sigs/controller-tools/blob/6eef398/cmd/controller-gen/main.go) to configure the CRD generator, we can easily transition this command to a Makefile, as well as more easily debug CRD generation errors.\n\nNote that a resource is no longer required to run the generator for Go projects. It will update each CRD that corresponds to an API. For non-go projects a resource is still required.\n\nCloses #2042", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "gen-csv --inputs config,deploy/prod/some_crd.yaml", + "$ operator-sdk olm-catalog gen-csv \\\r\n --include \"config/operator.yaml,config/role.yaml,config/300-eventing-v1alpha1-knativeeventing-crd.yaml,config/role_binding.yaml,config/service_account.yaml\" \\\r\n --from-version \"0.10.1\" \\\r\n --csv-version \"0.10.2\" \\\r\n --operator-name knative-eventing-operator \\\r\n --update-crds \\\r\n --csv-channel alpha \\\r\n --default-channel\r\nINFO[0000] Generating CSV manifest version 0.10.2 \r\nINFO[0000] API directory pkg/apis/operator/v1alpha1 does not exist. Skipping CSV annotation parsing for API operator.knative.dev/v1alpha1, Kind=Eventing. \r\nINFO[0000] Skipping non-object manifest deploy/olm-catalog/knative-eventing-operator/knative-eventing-operator.package.yaml \r\nINFO[0000] Created deploy/olm-catalog/knative-eventing-operator/0.10.2/knative-eventing-operator.v0.10.2.clusterserviceversion.yaml \r\nINFO[0000] Created deploy/olm-catalog/knative-eventing-operator/knative-eventing-operator.package.yaml \r\nError: lstat deploy/crds: no such file or directory\r\nUsage:\r\n operator-sdk olm-catalog gen-csv [flags]\r\n\r\nFlags:\r\n --csv-channel string Channel the CSV should be registered under in the package manifest\r\n --csv-version string Semantic version of the CSV\r\n --default-channel Use the channel passed to --csv-channel as the package manifests' default channel. Only valid when --csv-channel is set\r\n --from-version string Semantic version of an existing CSV to use as a base\r\n -h, --help help for gen-csv\r\n --include strings Paths to include in CSV generation, ex. \"deploy/prod,deploy/test\". If this flag is set and you want to enable default behavior, you must include \"deploy/\" in the argument list (default [deploy])\r\n --operator-name string Operator name to use while generating CSV\r\n --update-crds Update CRD manifests in deploy/{operator-name}/{csv-version} the using latest API's\r\n\r\nGlobal Flags:\r\n --verbose Enable verbose logging" + ] + } + }, + "metadata": { + "tags": [ + "operator-framework", + "incubating", + "app-definition", + "kind-feature", + "size-xl", + "do-not-merge-hold", + "needs-rebase", + "olm-integration" + ], + "category": "workloads", + "cncfProjects": [ + "operator-framework" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/operator-framework/operator-sdk/pull/2249", + "sourceRepo": "operator-framework/operator-sdk", + "reactions": 2, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:46:47.088Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/operator-framework/operator-framework-782-allow-users-to-use-multistage-docker-builds-based-on-dock.json b/solutions/cncf-generated/operator-framework/operator-framework-782-allow-users-to-use-multistage-docker-builds-based-on-dock.json new file mode 100644 index 00000000..29c5d66f --- /dev/null +++ b/solutions/cncf-generated/operator-framework/operator-framework-782-allow-users-to-use-multistage-docker-builds-based-on-dock.json @@ -0,0 +1,46 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:46:49.908Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "operator-framework: Allow users to use multistage Docker builds based on Docker version", + "description": "**Description of the change:** if docker version >= 17.05 is present, allow users to generate a multistage Dockerfile `build/multistage.Dockerfile` using `operator-sdk build --gen-multistage` and use it in building. The SDK will detect whether both docker version >=17.05 is available and a multistage Dockerfile is present, and if the former is true but not the latter will generate a warning to upgrade using `--gen-multistage`. The warning directs users to move rename `multistage.Dockerfile` to `Dockerfile` to avoid the warning in the future. Multistage Dockerfiles are used in tests too.\n\n**Motivation for the change:** some users might not be restricted by RHEL docker versioning, so they should be able to build Go binaries in a consistent environment. The only way to do so currently is with multistage builds. We should push users with docker version >= 17.05 towards using a multistage build with a warning, but not necessarily require it. This solution creates multistage Dockerfiles for ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "While this PR passes CI, multistage builds cause the memcached e2e test to run for 400 seconds, which normally run in 250 seconds. I tested these changes locally and the result is 220 vs 200 seconds for multistage and non-multistage, respectively. With an increased timeout, the total build time for Go e2e CI with these changes is ~17 minutes, ~3 minutes more than non-multistage. I originally thought this was a buildah issue but it appears to affect both buildah and docker multistage builds in travis VM's. Locally there are no issues.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "operator-framework", + "incubating", + "app-definition", + "kind-feature", + "size-xxl", + "do-not-merge-hold", + "needs-rebase" + ], + "category": "workloads", + "cncfProjects": [ + "operator-framework" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "sourceIssue": "https://github.com/operator-framework/operator-sdk/pull/782", + "sourceRepo": "operator-framework/operator-sdk", + "reactions": 1, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:46:49.908Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/ovn-kubernetes/ovn-kubernetes-2136-implement-externaltrafficpolicy-feature.json b/solutions/cncf-generated/ovn-kubernetes/ovn-kubernetes-2136-implement-externaltrafficpolicy-feature.json new file mode 100644 index 00000000..af72f517 --- /dev/null +++ b/solutions/cncf-generated/ovn-kubernetes/ovn-kubernetes-2136-implement-externaltrafficpolicy-feature.json @@ -0,0 +1,49 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:50:17.316Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "ovn-kubernetes: Implement externalTrafficPolicy Feature ", + "description": "fixes #1951 \n\nThis PR implements the the k8's externalTrafficPolicy Feature which is further described [here](https://www.asykim.com/blog/deep-dive-into-kubernetes-external-traffic-policies)\n\nThis feature is implemented only for SGW mode\n\nIt adds new Gateway Router load-balancers, explicitly formed to ignore the GWR option `options:lb_force_snat_ip=router_ip` using a new OVN feature `options:lb_skip_snat=true` which is in the [following patch](http://patchwork.ozlabs.org/project/ovn/patch/327cc4c2e4f954a279dca02bad89682cb6eec7c9.1617288043.git.lorenzo.bianconi@redhat.com/)\n\nAlso it ensures that when externalTrafficPolicy=`Local` It adds only the node-local endpoints to the new Loadbalancers to ensure that traffic is only forwarded to local endpoints \n\nIt is currently WIP since it requires a custom OVN patch that allows the `options:lb_force_snat_ip=router_ip` to be applied on a per LB level. \n\nLocal manual testing can be seen [here](https://gist.github.com/astoycos/b7f3690204f0114301", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Another thing that just came into my mind is that you need to consider the new loadbalancers when removing the vips (`deleteVIPsFromNonIdlingOVNBalancers` after rebase)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "BFD e2e multiple external gateway validation Should validate TCP/UDP connectivity to multiple external gateways for a UDP / TCP scenario IPV6 udp", + "[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [NodeConformance] [Conformance]", + "Jul 28 20:00:18.865: FAIL: All nodes should be ready after test, Get \"https://10.1.0.252:11337/api/v1/nodes\": dial tcp 10.1.0.252:11337: connect: no route to host" + ] + } + }, + "metadata": { + "tags": [ + "ovn-kubernetes", + "sandbox", + "orchestration" + ], + "category": "troubleshooting", + "cncfProjects": [ + "ovn-kubernetes" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "advanced", + "sourceIssue": "https://github.com/ovn-kubernetes/ovn-kubernetes/pull/2136", + "sourceRepo": "ovn-kubernetes/ovn-kubernetes", + "reactions": 1, + "comments": 31 + }, + "security": { + "scannedAt": "2026-02-27T17:50:17.316Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/ovn-kubernetes/ovn-kubernetes-4210-fix-egressip-initialization-on-ipv6-lgw-clusters.json b/solutions/cncf-generated/ovn-kubernetes/ovn-kubernetes-4210-fix-egressip-initialization-on-ipv6-lgw-clusters.json new file mode 100644 index 00000000..1e334ee4 --- /dev/null +++ b/solutions/cncf-generated/ovn-kubernetes/ovn-kubernetes-4210-fix-egressip-initialization-on-ipv6-lgw-clusters.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:50:18.744Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "ovn-kubernetes: Fix EgressIP initialization on IPv6 LGW clusters", + "description": "ovnkube-node fails here in ipv6 lgw clusters: https://github.com/ovn-org/ovn-kubernetes/blob/e5965ded4af0f0f6eb14cb7c2ada0713137ba4d5/go-controller/pkg/node/controllers/egressip/egressip.go#L287-L296\n```\novnkube.go:136] failed to run ovnkube: failed to start node network controller: failed to start default node network controller: failed to run egress IP controller: failed to create IP rule for node IPs: file exists \n```\n\nThis fails because in the current code the IP rule being added is always for ipv4.\nFix this by adding the rule with the appropriate family :family:", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[![Coverage Status](https://coveralls.io/builds/66235487/badge)](https://coveralls.io/builds/66235487)\n\ncoverage: 52.389% (-0.03%) from 52.416%\nwhen pulling **df2478631ec12dc4ccbb6cc3d91127a3361d4aa9 on kyrtapz:lgw_v6_eip_multinic_fix**\ninto **95c7a5eed5b6a3388b9d832dfdc9391d589b02b4 on ovn-org:master**.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "ovnkube.go:136] failed to run ovnkube: failed to start node network controller: failed to start default node network controller: failed to run egress IP controller: failed to create IP rule for node IPs: file exists", + "2024-03-12T15:29:08.7809802Z --- FAIL: TestNetworkPolicyV2Conformance/AdminNetworkPolicyIngressUDP (6.99s)\r\n2024-03-12T15:29:08.7812418Z --- PASS: TestNetworkPolicyV2Conformance/AdminNetworkPolicyIngressUDP/Should_support_an_'allow-ingress'_policy_for_UDP_protocol;_ensure_rule_ordering_is_respected (0.16s)\r\n2024-03-12T15:29:08.7815584Z --- PASS: TestNetworkPolicyV2Conformance/AdminNetworkPolicyIngressUDP/Should_support_an_'allow-ingress'_policy_for_UDP_protocol_at_the_specified_port (3.15s)\r\n2024-03-12T15:29:08.7818731Z --- FAIL: TestNetworkPolicyV2Conformance/AdminNetworkPolicyIngressUDP/Should_support_an_'deny-ingress'_policy_for_UDP_protocol;_ensure_rule_ordering_is_respected (0.16s)\r\n2024-03-12T15:29:08.7821885Z --- PASS: TestNetworkPolicyV2Conformance/AdminNetworkPolicyIngressUDP/Should_support_a_'deny-ingress'_policy_for_UDP_protocol_at_the_specified_port (3.14s)\r\n2024-03-12T15:29:08.7825145Z --- PASS: TestNetworkPolicyV2Conformance/AdminNetworkPolicyIngressUDP/Should_support_an_'pass-ingress'_policy_for_UDP_protocol;_ensure_rule_ordering_is_respected (0.18s)\r\n2024-03-12T15:29:08.7828267Z --- PASS: TestNetworkPolicyV2Conformance/AdminNetworkPolicyIngressUDP/Should_support_a_'pass-ingress'_policy_for_UDP_protocol_at_the_specified_port (0.18s)" + ] + } + }, + "metadata": { + "tags": [ + "ovn-kubernetes", + "sandbox", + "orchestration", + "kind-bug", + "feature-egress-ip" + ], + "category": "troubleshooting", + "cncfProjects": [ + "ovn-kubernetes" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/ovn-kubernetes/ovn-kubernetes/pull/4210", + "sourceRepo": "ovn-kubernetes/ovn-kubernetes", + "reactions": 1, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:50:18.744Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/perses/perses-2887-enhancement-timezone-selector-for-dashboard.json b/solutions/cncf-generated/perses/perses-2887-enhancement-timezone-selector-for-dashboard.json new file mode 100644 index 00000000..9c916540 --- /dev/null +++ b/solutions/cncf-generated/perses/perses-2887-enhancement-timezone-selector-for-dashboard.json @@ -0,0 +1,42 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:19.072Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "perses: [ENHANCEMENT] Timezone selector for dashboard", + "description": "# Description\n\nFixes: #2843 \n\n# Screenshots\n\nhttps://github.com/user-attachments/assets/ac0603d5-4da4-4574-ab46-7fe575960757\n\n# Checklist\n\n- [x] Pull request has a descriptive title and context useful to a reviewer.\n- [x] Pull request title follows the `[] ` naming convention using one of the\n following `catalog_entry` values: `FEATURE`, `ENHANCEMENT`, `BUGFIX`, `BREAKINGCHANGE`, `DOC`,`IGNORE`.\n- [x] All commits have [DCO signoffs](https://github.com/probot/dco#how-it-works).\n\n## UI Changes\n\n- [x] Changes that impact the UI include screenshots and/or screencasts of the relevant changes.\n- [x] Code follows the [UI guidelines](https://github.com/perses/perses/blob/main/ui/ui-guidelines.md).\n- [x] Visual tests are stable and unlikely to be flaky.\n See [e2e](https://github.com/perses/perses/tree/main/ui/e2e#visual-tests) docs for more details. Common issues include:\n - Is the data inconsistent? You need to mock API requests.\n - Does the time change? You ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Nice feature. But I don't think it's the right place to put it, this right zone is becoming more and more huge and space for variable is becoming less and less. We need to think a better place to put it :/\n![image](https://github.com/user-attachments/assets/87251842-cca9-49f6-b1bc-ea33bcb1854d)\n\nOn mobile, some button are now hidden.\n![image](https://github.com/user-attachments/assets/202ea64c-283a-45be-80de-3e0ba94235e6)\n\nUnderscore _ should be replace by spaces in city name", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "perses", + "sandbox", + "observability" + ], + "category": "observability", + "cncfProjects": [ + "perses" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/perses/perses/pull/2887", + "sourceRepo": "perses/perses", + "reactions": 3, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:49:19.072Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/pipecd/pipecd-1647-add-a-cloud-provider-for-ecs.json b/solutions/cncf-generated/pipecd/pipecd-1647-add-a-cloud-provider-for-ecs.json new file mode 100644 index 00000000..96a30b7f --- /dev/null +++ b/solutions/cncf-generated/pipecd/pipecd-1647-add-a-cloud-provider-for-ecs.json @@ -0,0 +1,50 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:23.086Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "pipecd: Add a cloud provider for ECS", + "description": "**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\nFixes #1665 \n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[![COVERAGE](https://img.shields.io/static/v1?label=Kapetanios&message=COVERAGE&color=success&style=flat)](https://kapetanios.dev/docs/plugins/coverage)\n\nCode coverage for **golang** is `32.35%`. This pull request **increases** coverage by `0.02%`.\n\n| File | Function | Base | Head | Diff |\n|-----|-----|:-----:|:-----:|:-----:|\n| [pkg/app/piped/logpersister/stagelogpersister.go](https://github.com/pipe-cd/pipe/blob/f6d01d21c9be70d525a462fae41395842e051c0b/pkg/app/piped/logpersister/stagelogpersister.go) | stageLogPersister.flushFromLastCheckpoint | `50.00%` | `61.11%` | `+11.11%` |", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "\n[![COVERAGE](https://img.shields.io/static/v1?label=Kapetanios&message=COVERAGE&color=success&style=flat)](https://kapetanios.dev/docs/plugins/coverage)\n\nCode coverage for **golang** is `32.35%`. This pull request **increases** coverage by `0.02%`.\n\n| File | Function | Base | Head | Diff |\n|-----|-----|:-----:|:-----:|:-----:|\n| [pkg/app/piped/logpersister/stagelogpersister.go](https://github.com/pipe-cd/pipe/blob/f6d01d21c9be70d525a462fae41395842e051c0b/pkg/app/piped/logpersister/stagelogpersister.go) | stageLogPersister.flushFromLastCheckpoint | `50.00%` | `61.11%` | `+11.11%` |\n\n\n\n[![GO_LINTER](https://img.shields.io/static/v1?label=Kapetanios&message=GO_LINTER&color=red&style=flat)](https://kapetanios.dev/docs/plugins/golinter)\n\nThe golinter build is completed with **FAILURE**. The build will be triggered again when you push any other commits. Or you can trigger it manually by `/golinter trigger` command right now.\n\nYou can check the build log from [here](https://kapetanios.io/builds/5942cea5-624c-425f-9586-6d4ad45d00e8).\n\n\n[![COVERAGE](https://img.shields.io/static/v1?label=Kapetanios&message=COVERAGE&color=success&style=flat)](https://kapetanios.dev/docs/plugins/coverage)\n\nCode coverage for **golang** is `32.06%`. This pull request does not change code coverage.\n\n\n\n\n[![COVERAGE](https://img.shields.io/static/v1?label=Kapetanios&message=COVERAGE&color=success&style=flat)](https://kapetanios.dev/docs/plugins/coverage)\n\nCode coverage for **golang** is `32.06%`. This pull request does not change code coverage.\n\n\n\n\n[![COVERAGE](https://img.shields.io/static/v1?label=Kapetanios&message=COVERAGE&color=success&style=flat)](https://kapetanios.dev/docs/plugins/coverage)\n\nCode coverage for **golang** is `32.28%`. This pull request does not change code coverage.\n\n\n\n\n[![COVERAGE](https://img.shields.io/static/v1?label=Kapetanios&message=COVERAGE&color=success&style=flat)](https://kapetanios.dev/docs/plugins/coverage)\n\nCode coverage for **golang** is `32.28%`. This pull request does not change code coverage.\n\n\n\n\n[![COVERAGE](https://img.shields.io/static/v1?label=Kapetanios&message=COVERAGE&color=success&style=flat)](https://kapetanios.dev/docs/plugins/coverage)\n\nCode coverage for **golang** is `32.28%`. This pull request does not change code coverage.\n\n\n\n\n[![COVERAGE](https://img.shields.io/static/v1?label=Kapetanios&message=COVERAGE&color=success&style=flat)](https://kapetanios.dev/docs/plugins/coverage)\n\nCode coverage for **golang** is `32.28%`. This pull request does not change code coverage.\n\n\n\n\n[![COVERAGE](https://img.shields.io/static/v1?label=Kapetanios&message=COVERAGE&color=success&style=flat)](https://kapetanios.dev/docs/plugins/coverage)\n\nCode coverage for **golang** is `32.28%`. This pull request does not change code coverage.\n\n\n\n\n[![GO_LINTER](https://img.shields.io/static/v1?label=Kapetanios&message=GO_LINTER&color=yellow&style=flat)](https://kapetanios.dev/docs/plugins/golinter)\n\nThe following files are not gofmt-ed. By commenting `/golinter fmt`, the formatted one will be appended to this pull request automatically.\n\n
pkg/app/piped/cloudprovider/ecs/ecs.go" + ] + } + }, + "metadata": { + "tags": [ + "pipecd", + "sandbox", + "app-definition", + "release-note-none", + "approved", + "size-l", + "area-piped", + "area-build" + ], + "category": "workloads", + "cncfProjects": [ + "pipecd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/pipe-cd/pipecd/pull/1647", + "sourceRepo": "pipe-cd/pipecd", + "reactions": 3, + "comments": 26 + }, + "security": { + "scannedAt": "2026-02-27T17:49:23.086Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/pipecd/pipecd-2772-add-a-piped-component-that-watches-app-configs.json b/solutions/cncf-generated/pipecd/pipecd-2772-add-a-piped-component-that-watches-app-configs.json new file mode 100644 index 00000000..a44425dc --- /dev/null +++ b/solutions/cncf-generated/pipecd/pipecd-2772-add-a-piped-component-that-watches-app-configs.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:24.254Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "pipecd: Add a piped component that watches app configs", + "description": "**What this PR does / why we need it**:\nThis PR adds a new Piped component that primarily watches two types of Applications that is:\n- already registered\n- unregistered\n\nFor the first one (already registered):\n- Piped sends only configs that have been changed since then\n- Contol-plane updates Applications stored in Datastore\n\nFor the second one (unregistered):\n- Piped sends all the latest configs every time whatever the repo is changed or not. The reason is the cache in control-plane is likely to be removed, and configs in Git is also likely to be removed\n- Control-plane caches them for suggesting when you attempt to add a new Application\n\nOnce this gets merged, I will drill into the detailed implementation.\n\n**Which issue(s) this PR fixes**:\n\nFixes https://github.com/pipe-cd/pipe/issues/2755\nRef https://github.com/pipe-cd/pipe/pull/2772\nRef https://github.com/pipe-cd/pipe/issues/2750\n\n**Does this PR introduce a user-facing change?**:\n\n```release-note\nNONE\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "[![COVERAGE](https://img.shields.io/static/v1?label=Kapetanios&message=COVERAGE&color=yellow&style=flat)](https://kapetanios.dev/docs/plugins/coverage)\n\nCode coverage for **golang** is `32.08%`. This pull request **decreases** coverage by `-0.03%`.\n\n| File | Function | Base | Head | Diff |\n|-----|-----|:-----:|:-----:|:-----:|\n| [pkg/app/api/grpcapi/piped_api.go](https://github.com/pipe-cd/pipe/blob/9d12e0505aa428039e2778ce4ad275685bb98b84/pkg/app/api/grpcapi/piped_api.go) | PipedAPI.UpdateApplicationConfigurations | `--` | `0.00%` | `+0.00%` |\n| [pkg/app/api/grpcapi/piped_api.go](https://github.com/pipe-cd/pipe/blob/9d12e0505aa428039e2778ce4ad275685bb98b84/pkg/app/api/grpcapi/piped_api.go) | PipedAPI.PutUnregisteredApplicationConfigurations | `--` | `0.00%` | `+0.00%` |\n| [pkg/app/piped/cloudprovider/terraform/terraform.go](https://github.com/pipe-cd/pipe/blob/9d12e0505aa428039e2778ce4ad275685bb98b84/pkg/app/piped/cloudprovider/terraform/terraform.go) | WithAdditionalEnvs | `--` | `0.00%` | `+0.00%` |\n| [pkg/app/piped/trigger/determiner.go](https://github.com/pipe-cd/pipe/blob/9d12e0505aa428039e2778ce4ad275685bb98b84/pkg/app/piped/trigger/determiner.go) | Determiner.shouldTriggerOnCommit | `--` | `0.00%` | `+0.00%` |\n| [pkg/model/application.go](https://github.com/pipe-cd/pipe/blob/9d12e0505aa428039e2778ce4ad275685bb98b84/pkg/model/application.go) | Application.ContainLabels | `--` | `77.78%` | `+77.78%` |\n| [pkg/model/deployment.go](https://github.com/pipe-cd/pipe/blob/9d12", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "pipecd", + "sandbox", + "app-definition", + "release-note-none", + "approved", + "area-api", + "area-piped", + "size-xl" + ], + "category": "workloads", + "cncfProjects": [ + "pipecd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/pipe-cd/pipecd/pull/2772", + "sourceRepo": "pipe-cd/pipecd", + "reactions": 3, + "comments": 32 + }, + "security": { + "scannedAt": "2026-02-27T17:49:24.254Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/pipecd/pipecd-4540-feat-ecs-enable-selection-of-listener-rules.json b/solutions/cncf-generated/pipecd/pipecd-4540-feat-ecs-enable-selection-of-listener-rules.json new file mode 100644 index 00000000..6be9a30e --- /dev/null +++ b/solutions/cncf-generated/pipecd/pipecd-4540-feat-ecs-enable-selection-of-listener-rules.json @@ -0,0 +1,53 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:25.316Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "pipecd: feat(ECS): enable selection of listener rules", + "description": "**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\nFixes https://github.com/pipe-cd/pipecd/issues/4530\n\n**Does this PR introduce a user-facing change?**:\n\n- **How are users affected by this change**:\n```\napiVersion: pipecd.dev/v1beta1\nkind: ECSApp\ninput:\n taskDefinitionFile: taskdef.yaml\n serviceDefinitionFile: servicedef.yaml\n targetGroups:\n primary:\n canary:\n```\n- **Is this breaking change**: no\n- **How to migrate (if breaking change)**:", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@moko-poi Don't worry about that. We can approve and ignore that 👌", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: pipecd.dev/v1beta1\r\nkind: ECSApp\r\ninput:\r\n taskDefinitionFile: taskdef.yaml\r\n serviceDefinitionFile: servicedef.yaml\r\n targetGroups:\r\n primary:\r\n canary:", + "| [Files](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd) | Coverage Δ | |\n|---|---|---|\n| [...g/app/piped/platformprovider/ecs/listener\\_rules.go](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd#diff-cGtnL2FwcC9waXBlZC9wbGF0Zm9ybXByb3ZpZGVyL2Vjcy9saXN0ZW5lcl9ydWxlcy5nbw==) | `100.00% <100.00%> (ø)` | |\n| [pkg/config/application\\_ecs.go](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd#diff-cGtnL2NvbmZpZy9hcHBsaWNhdGlvbl9lY3MuZ28=) | `9.52% <ø> (ø)` | |\n| [pkg/config/application\\_kubernetes.go](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd#diff-cGtnL2NvbmZpZy9hcHBsaWNhdGlvbl9rdWJlcm5ldGVzLmdv) | `8.00% <ø> (ø)` | |\n| [pkg/app/piped/platformprovider/ecs/ecs.go](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd#diff-cGtnL2FwcC9waXBlZC9wbGF0Zm9ybXByb3ZpZGVyL2Vjcy9lY3MuZ28=) | `0.00% <0.00%> (ø)` | |\n| [pkg/config/piped.go](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd#diff-cGtnL2NvbmZpZy9waXBlZC5nbw==) | `56.43% <37.50%> (+0.29%)` | :arrow_up: |\n| [...g/app/piped/platformprovider/kubernetes/kubectl.go](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd#diff-cGtnL2FwcC9waXBlZC9wbGF0Zm9ybXByb3ZpZGVyL2t1YmVybmV0ZXMva3ViZWN0bC5nbw==) | `0.00% <0.00%> (ø)` | |\n| [...g/app/piped/platformprovider/kubernetes/applier.go](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd#diff-cGtnL2FwcC9waXBlZC9wbGF0Zm9ybXByb3ZpZGVyL2t1YmVybmV0ZXMvYXBwbGllci5nbw==) | `0.00% <0.00%> (ø)` | |\n| [pkg/app/piped/platformprovider/ecs/client.go](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd#diff-cGtnL2FwcC9waXBlZC9wbGF0Zm9ybXByb3ZpZGVyL2Vjcy9jbGllbnQuZ28=) | `0.00% <0.00%> (ø)` | |\n\n... and [4 files with indirect coverage changes](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540/indirect-changes?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd)\n\n\n
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/pipe-cd/pipecd/pull/4540?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pipe-cd).\n\n@kentakozuka @khanhtc1202 \r\nThe CI has failed due to codecov/patch.\r\n\r\nThe coverage drop is due to missing test files. Adding tests might be beyond this PR's scope. \r\nHow should we address this?\r\n\r\nBest regards,\n@moko-poi Don't worry about that. We can approve and ignore that 👌\n@moko-poi Thanks so much for your contribution 🙏 There are 2 points that concern me in this current implementation.\r\n\r\nFirst, not sure why we need to nest the configuration as `listernerRules.rules`, `listernerRules` directly contains rules make more sense to me." + ] + } + }, + "metadata": { + "tags": [ + "pipecd", + "sandbox", + "app-definition", + "area-manifests", + "area-web", + "area-build", + "area-go", + "area-tool" + ], + "category": "workloads", + "cncfProjects": [ + "pipecd" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/pipe-cd/pipecd/pull/4540", + "sourceRepo": "pipe-cd/pipecd", + "reactions": 3, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:49:25.316Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/pipecd/pipecd-4571-modified-to-use-git-with-pat.json b/solutions/cncf-generated/pipecd/pipecd-4571-modified-to-use-git-with-pat.json new file mode 100644 index 00000000..9fb4a373 --- /dev/null +++ b/solutions/cncf-generated/pipecd/pipecd-4571-modified-to-use-git-with-pat.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:27.528Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "pipecd: Modified to use Git with PAT", + "description": "**What this PR does / why we need it**: Modified to use Personal Access Token since currently only SSH can control the Git repository.\n\n**Which issue(s) this PR fixes**:\n\nFixes #4106 \n\n**Does this PR introduce a user-facing change?**: Yes\n\n- **How are users affected by this change**: Be able to use Personal Access Token setting like this:\n\n```\napiVersion: pipecd.dev/v1beta1\nkind: Piped\nspec:\n git:\n personalAccessToken:\n userName: \n userToken: \n```\n\n- **Is this breaking change**: No\n- **How to migrate (if breaking change)**:", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@sZma5a Sorry for closing this PR regardless of your contribution 🙏 I reopened it.\nIt is because our GitHub actions automatically close PRs when keeping open with no activity 🙏 \n\nHow is your current situation? (e.g. Is this PR ready for Review?)\nIf you have any trouble, please feel free to ping me :)", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "apiVersion: pipecd.dev/v1beta1\r\nkind: Piped\r\nspec:\r\n git:\r\n personalAccessToken:\r\n userName: \r\n userToken: " + ] + } + }, + "metadata": { + "tags": [ + "pipecd", + "sandbox", + "app-definition", + "area-go", + "cherry-pick", + "v0-48-9-rc1" + ], + "category": "workloads", + "cncfProjects": [ + "pipecd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/pipe-cd/pipecd/pull/4571", + "sourceRepo": "pipe-cd/pipecd", + "reactions": 3, + "comments": 18 + }, + "security": { + "scannedAt": "2026-02-27T17:49:27.528Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/pipecd/pipecd-4739-use-repository-settings-from-piped-config.json b/solutions/cncf-generated/pipecd/pipecd-4739-use-repository-settings-from-piped-config.json new file mode 100644 index 00000000..57305578 --- /dev/null +++ b/solutions/cncf-generated/pipecd/pipecd-4739-use-repository-settings-from-piped-config.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:21.819Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "pipecd: Use repository settings from piped config", + "description": "**What this PR does / why we need it**:\n\n- planner/scheduler should follow repository settings in piped config.\n- plan preview builder should not check `Remote` and `Branch` because repository settings in app config metadata are not reliable (they can be outdated by modifying piped config)\n\n**Which issue(s) this PR fixes**:\n\nFixes #4618\n\n**Does this PR introduce a user-facing change?**:\n\n- **How are users affected by this change**: None\n- **Is this breaking change**: No\n- **How to migrate (if breaking change)**: None", + "type": "upgrade", + "status": "completed", + "resolution": { + "summary": "**What this PR does / why we need it**:\n\n- This PR stops staling PRs with the 'not-auto-close' tag.\n\n- Background: \n - https://github.com/pipe-cd/pipecd/pull/4739 was closed unintentionally by the stale action although the `not-auto-close` tag was assigned.\n - Cause: We needed `exempt-pr-labels`, not only `exempt-issue-labels.`\n - cf. https://github.com/actions/stale?tab=readme-ov-file#list-of-input-options", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "pipecd", + "sandbox", + "app-definition", + "area-go", + "change-category-breaking-change", + "not-auto-close" + ], + "category": "workloads", + "cncfProjects": [ + "pipecd" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/pipe-cd/pipecd/pull/4739", + "sourceRepo": "pipe-cd/pipecd", + "reactions": 5, + "comments": 11 + }, + "security": { + "scannedAt": "2026-02-27T17:49:21.819Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/pipecd/pipecd-4903-feat-add-support-mention-slack-groups-for-notificationreceiverslack-.json b/solutions/cncf-generated/pipecd/pipecd-4903-feat-add-support-mention-slack-groups-for-notificationreceiverslack-.json new file mode 100644 index 00000000..7c8a1578 --- /dev/null +++ b/solutions/cncf-generated/pipecd/pipecd-4903-feat-add-support-mention-slack-groups-for-notificationreceiverslack-.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:26.401Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "pipecd: feat: add support mention slack groups for NotificationReceiverSlack and NotificationMention", + "description": "**What this PR does / why we need it**: \nas mentioned from the issue #4730, currently PipeCD [pipeCD is supporting individual user mention](https://pipecd.dev/docs-v0.47.x/user-guide/managing-piped/configuration-reference/#notificationreceiverslack) \n\nAccording on difference syntax that slack support mention in message \n- [user](https://api.slack.com/reference/surfaces/formatting#mentioning-users) \n- [group](https://api.slack.com/reference/surfaces/formatting#mentioning-groups)\n\nThis PR will have feature to add field `mentionedGroups` for [NotificationReceiverSlack](https://pipecd.dev/docs-v0.47.x/user-guide/managing-piped/configuration-reference/#notificationreceiverslack) and `slackGroups` for [NotificationMention](https://pipecd.dev/docs-v0.47.x/user-guide/configuration-reference/#notificationmention)\n\n**Which issue(s) this PR fixes**:\n\nFixes #4730 \n\n**Does this PR introduce a user-facing change?**: N/A\n\n- **How are users affected by this change**: N/A\n- **Is this breaking change**:", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Ping @minhquang4334 👋", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "WDYT? @hungran @khanhtc1202 @ffjlabo @t-kikuc @minhquang4334\n> I checked the behavior, and this PR worked as expected! Thank you!\r\n> \r\n> By the way, I have an idea about duplicated codes. How about defining the wrapper codes for `FindSlackUsers` and `FindSlackGroups`?\r\n> \r\n> I think the codes are below, and I think these codes are defined in the config package.\r\n> \r\n>", + "> \r\n> WDYT? @hungran @khanhtc1202 @ffjlabo @t-kikuc @minhquang4334\r\n\r\n@Warashi thank you very much for checking, \r\nfor me avoid duplicate code in this case is greats, but better to see more view point, advice from team\nThank you for asking.\r\n\r\n@Warashi 's idea seems to have two purposes.\r\n- to unify the process to get users and groups in the same method\r\n- to unify the above processes on the scheduler, planner, and executor in the same method\r\n\r\nI totally agree with the idea, but I am concerned about the second purpose.\r\nFor me, creating the interface `Getter` is a little bit overly complex because it only abstracts the `metadataStore.Get`.\r\n\r\nSo how about solving the first one at the moment? \r\nIt means to create `getApplicationNotificationMentions` on the scheduler, planner, executor like this.\r\n\r\nWDYT? @hungran @Warashi @khanhtc1202 @t-kikuc @minhquang4334" + ] + } + }, + "metadata": { + "tags": [ + "pipecd", + "sandbox", + "app-definition", + "area-web", + "area-go" + ], + "category": "workloads", + "cncfProjects": [ + "pipecd" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "sourceIssue": "https://github.com/pipe-cd/pipecd/pull/4903", + "sourceRepo": "pipe-cd/pipecd", + "reactions": 3, + "comments": 12 + }, + "security": { + "scannedAt": "2026-02-27T17:49:26.401Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/podman-container-tools/podman-container-tools-12611-add-ip6-flag-to-podman-create-run.json b/solutions/cncf-generated/podman-container-tools/podman-container-tools-12611-add-ip6-flag-to-podman-create-run.json new file mode 100644 index 00000000..628dc962 --- /dev/null +++ b/solutions/cncf-generated/podman-container-tools/podman-container-tools-12611-add-ip6-flag-to-podman-create-run.json @@ -0,0 +1,47 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:39.111Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "podman-container-tools: add --ip6 flag to podman create/run", + "description": "#### What this PR does / why we need it:\n\nAdd the --ipv6 flag to podman create/run and pod create. We support the\n`--network name:ip6=` syntax now but for docker compat we should also\nsupport the --ip6 flag.\nNote that there is no validation if the ip is actually a v6 or v4 address\nbecause the backend does not care either.\n\n#### How to verify it\n\n#### Which issue(s) this PR fixes:\n\nFixes #7511\n\n#### Special notes for your reviewer:", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Between podman-create, run, and pod-create. The big difference\nis that I changed 'IP' to 'IPv4' in podman-pod-create, I believe\nthat was an oversight in #12611.\n\nSigned-off-by: Ed Santiago \n```release-note\nmore man-page deduplication\n```", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "podman-container-tools", + "sandbox", + "app-definition", + "lgtm", + "approved", + "locked---please-file-new-issue-pr" + ], + "category": "workloads", + "cncfProjects": [ + "podman-container-tools" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "beginner", + "sourceIssue": "https://github.com/containers/podman/pull/12611", + "sourceRepo": "containers/podman", + "reactions": 4, + "comments": 13 + }, + "security": { + "scannedAt": "2026-02-27T17:49:39.111Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/podman-container-tools/podman-container-tools-13490-pod-logs-enhancements-option-to-color-logs.json b/solutions/cncf-generated/podman-container-tools/podman-container-tools-13490-pod-logs-enhancements-option-to-color-logs.json new file mode 100644 index 00000000..9a5a0793 --- /dev/null +++ b/solutions/cncf-generated/podman-container-tools/podman-container-tools-13490-pod-logs-enhancements-option-to-color-logs.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:48.306Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "podman-container-tools: pod logs enhancements: option to color logs", + "description": "Created an option to colourise ```pod logs``` with an option ```--color```. You can recreate with the following steps:\n\n1. Create a pod with containers\n```bash\nbin/podman pod create --name=pod_testlogs; bin/podman run --name=testlogs_loop1_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop1: $i\"; sleep 1; done'; bin/podman run --name=testlogs_loop2_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop2: $i\"; sleep 3; done';\n```\n\n2. Logs with colour:\n\n```bash\nbin/podman pod logs --tail=10 -f --color pod_testlogs\n```\n\n3. Kill all pods and remove containers:\n\n```bash\nbin/podman kill --all; bin/podman rm --all; bin/podman pod rm --all\n```\n\nCloses #13266", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "Please rebase and force push your commit. There should only be one commit.", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ], + "codeSnippets": [ + "bin/podman pod create --name=pod_testlogs; bin/podman run --name=testlogs_loop1_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop1: $i\"; sleep 1; done'; bin/podman run --name=testlogs_loop2_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop2: $i\"; sleep 3; done';", + "bin/podman pod logs --tail=10 -f --color pod_testlogs", + "bin/podman kill --all; bin/podman rm --all; bin/podman pod rm --all" + ] + } + }, + "metadata": { + "tags": [ + "podman-container-tools", + "sandbox", + "app-definition", + "lgtm", + "approved", + "locked---please-file-new-issue-pr" + ], + "category": "workloads", + "cncfProjects": [ + "podman-container-tools" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containers/podman/pull/13490", + "sourceRepo": "containers/podman", + "reactions": 2, + "comments": 14 + }, + "security": { + "scannedAt": "2026-02-27T17:49:48.306Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/podman-container-tools/podman-container-tools-16143-initial-change-to-systemd-to-manage-volume-mounts.json b/solutions/cncf-generated/podman-container-tools/podman-container-tools-16143-initial-change-to-systemd-to-manage-volume-mounts.json new file mode 100644 index 00000000..40dff1f7 --- /dev/null +++ b/solutions/cncf-generated/podman-container-tools/podman-container-tools-16143-initial-change-to-systemd-to-manage-volume-mounts.json @@ -0,0 +1,48 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:41.273Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "podman-container-tools: Initial change to systemd to manage volume mounts", + "description": "This commit is the initial change to manage volumemounts via systemd and ignition instead of through SSH commands as part of `podman machine start`.\n\nThis attempts to fix #15976.\n\nKirk's TODO:\n\n- [x] Add More Unit Tests where applicable\n- [ ] Work with maintainers to decide functionality for existing SSH-mounted userdirs (how do we migrate existing users?)\n- [ ] Add comments where applicable in the new functions to clarify why we're adding two systemd files.\n\n#### Does this PR introduce a user-facing change?\n\nThis introduces a behavioral change in order to manage volume mounts on `podman machine` via systemd files via ignition config, which will persist user-defined mounts through the podman-machine updates and reboots without having to explicitly stop/start.\n\n```release-note\nTBD\n```", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "@baude @ashley-cui PTAL", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "podman-container-tools", + "sandbox", + "app-definition", + "do-not-merge-work-in-progress", + "needs-rebase", + "release-note", + "locked---please-file-new-issue-pr" + ], + "category": "workloads", + "cncfProjects": [ + "podman-container-tools" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containers/podman/pull/16143", + "sourceRepo": "containers/podman", + "reactions": 3, + "comments": 17 + }, + "security": { + "scannedAt": "2026-02-27T17:49:41.273Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/podman-container-tools/podman-container-tools-1751-add-hostname-to-etc-hosts.json b/solutions/cncf-generated/podman-container-tools/podman-container-tools-1751-add-hostname-to-etc-hosts.json new file mode 100644 index 00000000..eda54c39 --- /dev/null +++ b/solutions/cncf-generated/podman-container-tools/podman-container-tools-1751-add-hostname-to-etc-hosts.json @@ -0,0 +1,45 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:45.630Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "podman-container-tools: Add hostname to /etc/hosts", + "description": "This can fix #1745 \nSigned-off-by: Qi Wang ", + "type": "troubleshoot", + "status": "completed", + "resolution": { + "summary": "/etc/resolv.conf and /etc/hosts should not be created and mounted when the\nnetwork is disabled.\n\nWe should not be calling the network setup and cleanup functions when it is\ndisabled either.\n\nSigned-off-by: Daniel J Walsh ", + "steps": [ + "Review the issue discussion for context", + "Apply the fix from the linked pull request" + ] + } + }, + "metadata": { + "tags": [ + "podman-container-tools", + "sandbox", + "app-definition", + "lgtm", + "approved", + "locked---please-file-new-issue-pr" + ], + "category": "workloads", + "cncfProjects": [ + "podman-container-tools" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "sourceIssue": "https://github.com/containers/podman/pull/1751", + "sourceRepo": "containers/podman", + "reactions": 2, + "comments": 10 + }, + "security": { + "scannedAt": "2026-02-27T17:49:45.631Z", + "scannerVersion": "cncf-gen-1.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/podman-container-tools/podman-container-tools-23391-convert-additional-build-context-paths-on-windows.json b/solutions/cncf-generated/podman-container-tools/podman-container-tools-23391-convert-additional-build-context-paths-on-windows.json new file mode 100644 index 00000000..0f044e11 --- /dev/null +++ b/solutions/cncf-generated/podman-container-tools/podman-container-tools-23391-convert-additional-build-context-paths-on-windows.json @@ -0,0 +1,52 @@ +{ + "format": "kc-mission-v1", + "exportedAt": "2026-02-27T17:49:42.693Z", + "exportedBy": "cncf-mission-generator", + "consoleVersion": "auto-generated", + "mission": { + "title": "podman-container-tools: Convert additional build context paths on Windows", + "description": "This PR partially fixes issues with the option `--build-context