diff --git a/search-state.json b/search-state.json new file mode 100644 index 00000000..3ce34662 --- /dev/null +++ b/search-state.json @@ -0,0 +1,179 @@ +{ + "version": 1, + "lastUpdated": "2026-03-02T19:43:54.021Z", + "projects": { + "strimzi/strimzi-kafka-operator": { + "github-issues": { + "lastSearched": "2026-03-02T19:37:57.626Z", + "processedIds": [ + "gh:strimzi/strimzi-kafka-operator#9857", + "gh:strimzi/strimzi-kafka-operator#11210", + "gh:strimzi/strimzi-kafka-operator#9153", + "gh:strimzi/strimzi-kafka-operator#6180" + ], + "cursor": null + }, + "github-discussions": { + "lastSearched": "2026-03-02T19:38:00.103Z", + "processedIds": [], + "cursor": "Y3Vyc29yOnYyOpK0MjAyNS0wMS0wOFQxMzo1MDozMVrOAHb6Yg==" + } + }, + "thanos-io/thanos": { + "github-issues": { + "lastSearched": "2026-03-02T19:38:20.706Z", + "processedIds": [ + "gh:thanos-io/thanos#8110", + "gh:thanos-io/thanos#1952", + "gh:thanos-io/thanos#1906", + "gh:thanos-io/thanos#4141", + "gh:thanos-io/thanos#1268", + "gh:thanos-io/thanos#6816", + "gh:thanos-io/thanos#4292", + "gh:thanos-io/thanos#5408", + "gh:thanos-io/thanos#2138", + "gh:thanos-io/thanos#3166", + "gh:thanos-io/thanos#4934", + "gh:thanos-io/thanos#656" + ], + "cursor": null + }, + "github-discussions": { + "lastSearched": "2026-03-02T19:38:22.613Z", + "processedIds": [], + "cursor": "Y3Vyc29yOnYyOpK0MjAyMi0wOC0wMVQyMDoyMToxN1rOAECSiQ==" + } + }, + "volcano-sh/volcano": { + "github-issues": { + "lastSearched": "2026-03-02T19:38:30.647Z", + "processedIds": [ + "gh:volcano-sh/volcano#2203", + "gh:volcano-sh/volcano#4581", + "gh:volcano-sh/volcano#683", + "gh:volcano-sh/volcano#4767", + "gh:volcano-sh/volcano#4273", + "gh:volcano-sh/volcano#4782", + "gh:volcano-sh/volcano#687" + ], + "cursor": null + }, + "github-discussions": { + "lastSearched": "2026-03-02T19:38:31.039Z", + "processedIds": [], + "cursor": null + } + }, + "wasmCloud/wasmCloud": { + "github-issues": { + "lastSearched": "2026-03-02T19:38:32.108Z", + "processedIds": [], + "cursor": null + }, + "github-discussions": { + "lastSearched": "2026-03-02T19:38:33.860Z", + "processedIds": [], + "cursor": null + } + }, + "aeraki-mesh/aeraki": { + "github-issues": { + "lastSearched": "2026-03-02T19:38:34.763Z", + "processedIds": [], + "cursor": null + }, + "github-discussions": { + "lastSearched": "2026-03-02T19:38:35.478Z", + "processedIds": [], + "cursor": null + } + }, + "project-akri/akri": { + "github-issues": { + "lastSearched": "2026-03-02T19:38:37.552Z", + "processedIds": [ + "gh:project-akri/akri#346" + ], + "cursor": null + }, + "github-discussions": { + "lastSearched": "2026-03-02T19:38:38.401Z", + "processedIds": [], + "cursor": null + } + }, + "antrea-io/antrea": { + "github-issues": { + "lastSearched": "2026-03-02T19:38:44.867Z", + "processedIds": [ + "gh:antrea-io/antrea#2121", + "gh:antrea-io/antrea#1196", + "gh:antrea-io/antrea#5483" + ], + "cursor": null + }, + "github-discussions": { + "lastSearched": "2026-03-02T19:38:45.587Z", + "processedIds": [], + "cursor": null + } + }, + "armadaproject/armada": { + "github-issues": { + "lastSearched": "2026-03-02T19:38:47.507Z", + "processedIds": [ + "gh:armadaproject/armada#2111" + ], + "cursor": null + }, + "github-discussions": { + "lastSearched": "2026-03-02T19:38:48.217Z", + "processedIds": [], + "cursor": null + } + }, + "AthenZ/athenz": { + "github-issues": { + "lastSearched": "2026-03-02T19:38:49.392Z", + "processedIds": [], + "cursor": null + }, + "github-discussions": { + "lastSearched": "2026-03-02T19:38:50.883Z", + "processedIds": [], + "cursor": null + } + }, + "runatlantis/atlantis": { + "github-issues": { + "lastSearched": "2026-03-02T19:39:15.224Z", + "processedIds": [ + "gh:runatlantis/atlantis#3607", + "gh:runatlantis/atlantis#1392", + "gh:runatlantis/atlantis#4114", + "gh:runatlantis/atlantis#4193", + "gh:runatlantis/atlantis#4499", + "gh:runatlantis/atlantis#3269", + "gh:runatlantis/atlantis#2261", + "gh:runatlantis/atlantis#1352", + "gh:runatlantis/atlantis#4001", + "gh:runatlantis/atlantis#4978", + "gh:runatlantis/atlantis#5940", + "gh:runatlantis/atlantis#2002", + "gh:runatlantis/atlantis#4229", + "gh:runatlantis/atlantis#3086", + "gh:runatlantis/atlantis#2507", + "gh:runatlantis/atlantis#2055", + "gh:runatlantis/atlantis#4275", + "gh:runatlantis/atlantis#3287" + ], + "cursor": null + }, + "github-discussions": { + "lastSearched": "2026-03-02T19:39:16.796Z", + "processedIds": [], + "cursor": "Y3Vyc29yOnYyOpK0MjAyMi0xMC0xMFQxNTo0MDoyMFrOAEQKfQ==" + } + } + } +} \ No newline at end of file diff --git a/solutions/cncf-generated/alertmanager/alertmanager-3944-feat-allow-nested-details-fields-in-pagerduty.json b/solutions/cncf-generated/alertmanager/alertmanager-3944-feat-allow-nested-details-fields-in-pagerduty.json new file mode 100644 index 00000000..44678340 --- /dev/null +++ b/solutions/cncf-generated/alertmanager/alertmanager-3944-feat-allow-nested-details-fields-in-pagerduty.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "alertmanager-3944-feat-allow-nested-details-fields-in-pagerduty", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "alertmanager: feat: allow nested details fields in pagerduty", + "description": "This change allows nested key/value pair in pageduty configuration.\n\nEvents API V1 supports an object in `details` field:\nhttps://developer.pagerduty.com/docs/send-v1-event#parameters\n\nEvents API V2 supports an object in `payload.custom_details` field:\nhttps://developer.pagerduty.com/docs/send-alert-event#parameters\n\nThe configuration and message/payload types where changed from `map[string]string` to `map[string]any`.\n\nThe default template is updated to use the new `toJson` function.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This change allows nested key/value pair in pageduty configuration.\n\nEvents API V1 supports an object in `details` field:\nhttps://developer.pagerduty.com/docs/send-v1-event#parameters\n\nEvents API V2 s" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nfields:\r\n # Components\r\n components: { name: \"Monitoring\" }\r\n # Custom Field TextField\r\n customfield_10001: \"Random text\"\r\n # Custom Field SelectList\r\n customfield_10002: {\"value\": \"red\"}\r\n # Custom Field MultiSelect\r\n customfield_10003: [{\"value\": \"red\"}, {\"value\": \"blue\"}, {\"value\": \"green\"}]\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/prometheus/alertmanager/pull/4083. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "The Jira API requests different fields value types depends on the custom field type itself.\n\nRef. https://developer.atlassian.com/server/jira/platform/jira-rest-api-examples/#setting-custom-field-data-for-other-field-types, for example:\n\n```\nfields:\n # Components\n components: { name: \"Monitoring\" }\n # Custom Field TextField\n customfield_10001: \"Random text\"\n # Custom Field SelectList\n customfield_10002: {\"value\": \"red\"}\n # Custom Field MultiSelect\n customfield_10003: [{\"value\": \"red\"}, {\"value\": \"blue\"}, {\"value\": \"green\"}]\n```\n\nWhile jiralert allowed field values with different types as well, it might be problematic in context of the Alertmanager.\n\n* A new dependency `github.com/trivago/tgo` was necessary to cast all keys to strings. Ref. https://github.com/prometheus/alertmanager/pull/3590#discussion_r1816521316\n* Looking at Prometheus Operator, Kubernetes CRDs doesn't [support such flexible types](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema), because type is not allowed inside `anyOf`. From Kubernetes point of view, all field values must be declared as string and an additional transformation logic is necessary to convert string values to complex values.\n\nI feel that re-implement the logic from jiralert was design flaw while implement the jira notifier at Alertmanager.\n\nWDYT: @simonpasquier @dswarbrick", + "codeSnippets": [ + "fields:\r\n # Components\r\n components: { name: \"Monitoring\" }\r\n # Custom Field TextField\r\n customfield_10001: \"Random text\"\r\n # Custom Field SelectList\r\n customfield_10002: {\"value\": \"red\"}\r\n # Custom Field MultiSelect\r\n customfield_10003: [{\"value\": \"red\"}, {\"value\": \"blue\"}, {\"value\": \"green\"}]", + "fields:\r\n # Components\r\n components: { name: \"Monitoring\" }\r\n # Custom Field TextField\r\n customfield_10001: \"Random text\"\r\n # Custom Field SelectList\r\n customfield_10002: {\"value\": \"red\"}\r\n # Custom Field MultiSelect\r\n customfield_10003: [{\"value\": \"red\"}, {\"value\": \"blue\"}, {\"value\": \"green\"}]", + "Would this avoid having to embed YAML inside existing YAML? I haven't checked if this will affect existing configurations, but it would be good to understand if this is a better option.\n> Have you considered changing `Details` in the configuration file from `map[string]string` to `map[string]interface{}`?\r\n\r\nThat was the initial approach I though about. It will create a free style format and would require checking the interfaces one by one as they can be either maps or templated strings. I'm open to experiment with it.\nCurious to know... Is this going forward ?\r\nSeems a real blessing having PD Custom Details properly rendered from the Alert Payload. \nI'll update this PR with the suggested `map[string]interface{}` to support nested fields.\nThinking about this more, using a template like `alerts: {{ .Alerts }}` will translate into `alert` key with value of string as templates are only rendered into strings.\r\n\r\nParsing template results into maps or slices is possible but could be error-prone and we'd need a format for it.\r\nAlternative option could be toggles to include specific details in the payload:", + "I updated the PR to use the new `toJson` template function.\r\nAlso we can now support this in both Events API v1 and v2.\r\n\"image\"\r\n\n@siavashs you are a true hero\n@siavashs For v1 integration, this seems to have changed the urls in label/annotations to not render as links in PD. PD used to make them clickable hyperlinks when `firing` value was string, but now it's all jsonified and render as plain text.\n@abhijith-db this sounds like a UI issue on PD side.\r\nAs a workaround you can always use a \"non-json\" template on Alertmanager side and it should restore the old format.\n@siavashs Yeah that's a workaround. Only problem is there's no global PD default template to configure. Each receiver has to use the non-json template to linkify urls in annotations\nI see, we'll add a global config for PagerDuty.\n@siavashs Also the deep copy with template was affecting this too. Had to fork and fix on our end to exclude string type. As:", + "Failing testcase that would have passed before this change:" + ] + } + }, + "metadata": { + "tags": [ + "alertmanager", + "graduated", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "alertmanager" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/prometheus/alertmanager/pull/3944", + "repo": "https://github.com/prometheus/alertmanager", + "pr": "https://github.com/prometheus/alertmanager/pull/4083" + }, + "reactions": 7, + "comments": 15, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with alertmanager installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:36:51.809Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/antrea/antrea-1196-use-specific-kustomize-version-when-generating-manifests.json b/solutions/cncf-generated/antrea/antrea-1196-use-specific-kustomize-version-when-generating-manifests.json new file mode 100644 index 00000000..3c0d5c70 --- /dev/null +++ b/solutions/cncf-generated/antrea/antrea-1196-use-specific-kustomize-version-when-generating-manifests.json @@ -0,0 +1,79 @@ +{ + "version": "kc-mission-v1", + "name": "antrea-1196-use-specific-kustomize-version-when-generating-manifests", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "antrea: Use specific kustomize version when generating manifests", + "description": "Ensure that we use the desired version of kustomize even when there is\nalready an installation of kustomize. This is important because in\nv3.8.0, kustomize stopped using apimachinery by default and switched\nto its own library (kyaml) for K8s resource YAML manipulation. Because\nof this change, the generated YAMLs are different: fields within objects\nmay be ordered differently, and the latest kustomize generally does a\nbetter job dropping empty fields. We set the desired version to v3.8.2.\n\nNo act", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Ensure that we use the desired version of kustomize even when there is\nalready an installation of kustomize. This is important because in\nv3.8.0, kustomize stopped using apimachinery by default and sw" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nGO111MODULE=on go get sigs.k8s.io/kustomize/kustomize/v3\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/antrea-io/antrea/pull/987. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "In v3.8.0, kustomize stopped using apimachinery by default and switched\nto its own library (kyaml) for K8s resource YAML manipulation. Because\nof this change, the generated YAMLs are different: fields within objects\nmay be ordered differently, and the latest kustomize generally does a\nbetter job dropping empty fields. We are switching the min required\nversion of kustomize to 3.8.1 so that Antrea developers can keep working\nwith a recent version of kustomize without CI checks failing. Note that\nwe are using 3.8.1 and not 3.8.0 which has some known issues.\n\nFor new developers which do not have kustomize, the new version will be\ninstalled automatically when running generate-manifest.sh. Others will\nsee an error message about their version of kustomize being too old, and\nthey can update with:\n\n```\nGO111MODULE=on go get sigs.k8s.io/kustomize/kustomize/v3\n```", + "codeSnippets": [ + "GO111MODULE=on go get sigs.k8s.io/kustomize/kustomize/v3", + "GO111MODULE=on go get sigs.k8s.io/kustomize/kustomize/v3" + ] + } + }, + "metadata": { + "tags": [ + "antrea", + "sandbox", + "networking", + "troubleshoot" + ], + "cncfProjects": [ + "antrea" + ], + "targetResourceKinds": [ + "Job" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/antrea-io/antrea/pull/1196", + "repo": "https://github.com/antrea-io/antrea", + "pr": "https://github.com/antrea-io/antrea/pull/987" + }, + "reactions": 1, + "comments": 10, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with antrea installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:43.213Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/antrea/antrea-2121-packet-in-rate-limiting-with-of-meter.json b/solutions/cncf-generated/antrea/antrea-2121-packet-in-rate-limiting-with-of-meter.json new file mode 100644 index 00000000..148d5285 --- /dev/null +++ b/solutions/cncf-generated/antrea/antrea-2121-packet-in-rate-limiting-with-of-meter.json @@ -0,0 +1,90 @@ +{ + "version": "kc-mission-v1", + "name": "antrea-2121-packet-in-rate-limiting-with-of-meter", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "antrea: Packet-in rate limiting with OF Meter", + "description": "Add OF meter to implement rate-limiting for packet-in.\n\n1. Add meter entry while initialization.\n2. While building the flow that will trigger packet-in, except traceflow, apply meter entry to it.\n3. Update libOpenflow and ofnet version, because OF meter related commit has been merged into them.\n\nSince windows OVS doesn't support OF meter, we skip OF meter related operations for now. We contacted OVS team to ask for this support and will remove skip after windows OVS support OF meter.\n\nThe benchm", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Add OF meter to implement rate-limiting for packet-in.\n\n1. Add meter entry while initialization.\n2. While building the flow that will trigger packet-in, except traceflow, apply meter entry to it.\n3. U" + }, + { + "title": "Add meter entries during initialization: one for Traceflow packets", + "description": "Add meter entries during initialization: one for Traceflow packets" + }, + { + "title": "While building the flow that will trigger packet-in, use the meter", + "description": "While building the flow that will trigger packet-in, use the meter" + }, + { + "title": "Update libOpenflow and ofnet version, to get meter programming", + "description": "Update libOpenflow and ofnet version, to get meter programming" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\ntop - 22:28:03 up 5 days, 5:34, 1 user, load average: 0.03, 0.38, 0.55\r\nTasks: 157 total, 1 running, 156 sleeping, 0 stopped, 0 zombie\r\n%Cpu(s): 4.0 us, 5.8 sy, 0.0 ni, 60.2 id, 0.0 wa, 0.0 hi, 30.0 si, 0.0 st\r\nMiB Mem : 1987.6 total, 587.1 free, 472.5 used, 927.9 buff/cache\r\nMiB Swap: 0.0 total, 0.0 free, 0.0 used. 1333.6 avail Mem\r\n\r\n PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND\r\n2022714 root 10 -10 235600 39464\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/antrea-io/antrea/pull/2215. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Add OF meter to implement rate-limiting for packet-in messages.\n\n1. Add meter entries during initialization: one for Traceflow packets\n and one for other (NetworkPolicy-related) packets.\n2. While building the flow that will trigger packet-in, use the meter\n action.\n3. Update libOpenflow and ofnet version, to get meter programming\n support.\n\nSince Windows OVS doesn't support OF meters, we skip OF meter related\noperations for now. On Linux, for the OVS kernel datapath, kernel\nversion 4.18 is required for meter support (should be 4.15, but is 4.18\nin practice because of an implementation bug): we add a check and\ndisable meters if the Linux kernel is not recent enough. This is to\navoid increasing the minimum kernel version requirement for Antrea, at\nleast for now.\n\nCo-authored-by: Antonin Bas ", + "codeSnippets": [ + "top - 22:28:03 up 5 days, 5:34, 1 user, load average: 0.03, 0.38, 0.55\r\nTasks: 157 total, 1 running, 156 sleeping, 0 stopped, 0 zombie\r\n%Cpu(s): 4.0 us, 5.8 sy, 0.0 ni, 60.2 id, 0.0 wa, 0.0 hi, 30.0 si, 0.0 st\r\nMiB Mem : 1987.6 total, 587.1 free, 472.5 used, 927.9 buff/cache\r\nMiB Swap: 0.0 total, 0.0 free, 0.0 used. 1333.6 avail Mem\r\n\r\n PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND\r\n2022714 root 10 -10 235600 39464", + "top - 22:28:03 up 5 days, 5:34, 1 user, load average: 0.03, 0.38, 0.55\r\nTasks: 157 total, 1 running, 156 sleeping, 0 stopped, 0 zombie\r\n%Cpu(s): 4.0 us, 5.8 sy, 0.0 ni, 60.2 id, 0.0 wa, 0.0 hi, 30.0 si, 0.0 st\r\nMiB Mem : 1987.6 total, 587.1 free, 472.5 used, 927.9 buff/cache\r\nMiB Swap: 0.0 total, 0.0 free, 0.0 used. 1333.6 avail Mem\r\n\r\n PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND\r\n2022714 root 10 -10 235600 39464 12156 S 7.3 1.9 0:02.00 ovs-vswitchd\r\n 10 root 20 0 0 0 0 S 5.7 0.0 0:42.08 ksoftirqd/0\r\n2022555 root 20 0 1267928 51976 34572 S 5.0 2.6 0:02.49 antrea-agent\r\n 14079 root 20 0 1941628 72840 30968 S 2.3 3.6 164:48.40 kubelet", + "top - 22:19:06 up 5 days, 5:25, 1 user, load average: 1.48, 1.02, 0.75\r\nTasks: 147 total, 3 running, 144 sleeping, 0 stopped, 0 zombie\r\n%Cpu(s): 33.3 us, 19.1 sy, 0.0 ni, 11.1 id, 0.0 wa, 0.0 hi, 36.5 si, 0.0 st\r\nMiB Mem : 1987.6 total, 566.6 free, 480.7 used, 940.3 buff/cache\r\nMiB Swap: 0.0 total, 0.0 free, 0.0 used. 1326.7 avail Mem\r\n\r\n PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND\r\n2010190 root 10 -10 235704 46428 12156 R 98.0 2.3 1:40.03 ovs-vswitchd\r\n2009999 root 20 0 1341660 56128 34732 R 55.4 2.8 1:02.89 antrea-agent\r\n 10 root 20 0 0 0 0 S 7.3 0.0 0:40.94 ksoftirqd/0\r\n 14079 root 20 0 1941628 73520 31496 S 3.6 3.6 164:32.39 kubelet" + ] + } + }, + "metadata": { + "tags": [ + "antrea", + "sandbox", + "networking", + "troubleshoot" + ], + "cncfProjects": [ + "antrea" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/antrea-io/antrea/pull/2121", + "repo": "https://github.com/antrea-io/antrea", + "pr": "https://github.com/antrea-io/antrea/pull/2215" + }, + "reactions": 2, + "comments": 13, + "synthesizedBy": "regex", + "qualityScore": 68 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with antrea installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:41.408Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo-events/argo-events-1395-feat-redis-streams-as-event-source.json b/solutions/cncf-generated/argo-events/argo-events-1395-feat-redis-streams-as-event-source.json new file mode 100644 index 00000000..148921ec --- /dev/null +++ b/solutions/cncf-generated/argo-events/argo-events-1395-feat-redis-streams-as-event-source.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "argo-events-1395-feat-redis-streams-as-event-source", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo-events: feat: Redis streams as event source", + "description": "Checklist:\n\n* [ ] My organization is added to [USERS.md](https://github.com/argoproj/argo-events/blob/master/USERS.md).\n\ncloses: #1369 \n\nTested with single and multiple streams on minikube cluster:\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: EventSource\nmetadata:\n name: redis-stream\nspec:\n redisStream:\n example:\n hostAddress: 192.168.0.106:6379\n db: 0\n streams:\n - FOO\n - BAR\n```\nWith pod creation as sensor:\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: Senso", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Checklist:\n\n* [ ] My organization is added to [USERS.md](https://github.com/argoproj/argo-events/blob/master/USERS.md).\n\ncloses: #1369 \n\nTested with single and multiple streams on minikube cluster:\n``" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: argoproj.io/v1alpha1\r\nkind: EventSource\r\nmetadata:\r\n name: redis-stream\r\nspec:\r\n redisStream:\r\n example:\r\n hostAddress: 192.168.0.106:6379\r\n db: 0\r\n streams:\r\n - FOO\r\n - BAR\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/argoproj/argo-events/pull/1744. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Checklist:\n\n* [x] My organization is added to [USERS.md](https://github.com/argoproj/argo-events/blob/master/USERS.md).\n\ncloses: #1369 \nprevious discussion: #1395 \n\nMessages from the stream are read using the Redis consumer group. The main reason for using consumer group is to resume from the last read upon pod restarts. A common consumer group (defaults to \"argo-events-cg\") is created (if not already exists) on all specified streams. When using consumer group, each read through a consumer group is a write operation, because Redis needs to update the last retrieved message id and the pending entries list(PEL) of that specific user in the consumer group. So it can only work with the master Redis instance and not replicas (https://redis.io/topics/streams-intro).\n\nRedis stream event source expects all the streams to be present on the Redis server. This event source only starts pulling messages from the streams when all of the specified streams exist on the Redis server. On the initial setup, the consumer group is created on all the specified streams to start reading from the latest message (not necessarily the beginning of the stream). On subsequent setups (the consumer group already exists on the streams) or during pod restarts, messages are pulled from the last unacknowledged message in the stream.\n\nThe consumer group is never deleted automatically. If you want a completely fresh setup again, you must delete the consumer group from the streams.\n\nTested with single and multiple", + "codeSnippets": [ + "apiVersion: argoproj.io/v1alpha1\r\nkind: EventSource\r\nmetadata:\r\n name: redis-stream\r\nspec:\r\n redisStream:\r\n example:\r\n hostAddress: 192.168.0.106:6379\r\n db: 0\r\n streams:\r\n - FOO\r\n - BAR", + "apiVersion: argoproj.io/v1alpha1\r\nkind: EventSource\r\nmetadata:\r\n name: redis-stream\r\nspec:\r\n redisStream:\r\n example:\r\n hostAddress: 192.168.0.106:6379\r\n db: 0\r\n streams:\r\n - FOO\r\n - BAR", + "apiVersion: argoproj.io/v1alpha1\r\nkind: Sensor\r\nmetadata:\r\n name: redis-stream-sensor\r\nspec:\r\n template:\r\n serviceAccountName: argo-events-sa\r\n dependencies:\r\n - name: payload\r\n eventSourceName: redis-stream\r\n eventName: example\r\n triggers:\r\n - template:\r\n name: payload\r\n k8s:\r\n group: \"\"\r\n version: v1\r\n resource: pods\r\n operation: create\r\n source:\r\n resource:\r\n apiVersion: v1\r\n kind: Pod\r\n metadata:\r\n generateName: payload-\r\n labels:\r\n app: payload\r\n spec:\r\n containers:\r\n - name: hello\r\n image: alpine\r\n command: [\"echo\"]\r\n args: [\"This is the message you sent me:\\n\", \"\"]\r\n restartPolicy: Never\r\n parameters:\r\n - src:\r\n dependencyName: payload\r\n dataKey: body.message\r\n dest: spec.containers.0.args.1", + "➜ kubectl -n argo-events logs -f payload-fxlfp\r\nThis is the message you sent me:\r\n {\"stream\":\"BAR\",\"message_id\":\"1635165767378-0\",\"values\":{\"BARId\":\"BAR-1\",\"second-key\":\"second-val\"}}\r\n\r\n➜ kubectl -n argo-events logs -f payload-xkfzs\r\nThis is the message you sent me:\r\n {\"stream\":\"FOO\",\"message_id\":\"1635165767376-0\",\"values\":{\"FOOId\":\"FOO-1\",\"second-key\":\"second-val\"}}", + "➜ kubectl -n argo-events logs -f redis-stream-eventsource-nnr9k-566d47dd54-lfvc4\r\n<--- snipped --->\r\n{\"level\":\"info\",\"ts\":1635165767.3778944,\"logger\":\"argo-events.eventsource\",\"caller\":\"redisstream/start.go:161\",\"msg\":\"received a message\",\"eventSourceName\":\"redis-stream\",\"eventSourceType\":\"redisStream\",\"eventName\":\"example\",\"stream\":\"FOO\",\"message_id\":\"1635165767376-0\"}\r\n{\"level\":\"info\",\"ts\":1635165767.37798,\"logger\":\"argo-events.eventsource\",\"caller\":\"redisstream/start.go:172\",\"msg\":\"dispatching the event on the data channel...\",\"eventSourceName\":\"redis-stream\",\"eventSourceType\":\"redisStream\",\"eventName\":\"example\",\"stream\":\"FOO\"}\r\n{\"level\":\"info\",\"ts\":1635165767.382242,\"logger\":\"argo-events.eventsource\",\"caller\":\"eventsources/eventing.go:427\",\"msg\":\"succeeded to publish an event\",\"eventSourceName\":\"redis-stream\",\"eventName\":\"example\",\"eventSourceType\":\"redisStream\",\"eventID\":\"65383936383734332d666337662d343762662d623035382d373134313863326565373865\"}\r\n{\"level\":\"info\",\"ts\":1635165767.3863325,\"logger\":\"argo-events.eventsource\",\"caller\":\"redisstream/start.go:161\",\"msg\":\"received a message\",\"eventSourceName\":\"redis-stream\",\"eventSourceType\":\"redisStream\",\"eventName\":\"example\",\"stream\":\"BAR\",\"message_id\":\"1635165767378-0\"}\r\n{\"level\":\"info\",\"ts\":1635165767.3864572,\"logger\":\"argo-events.eventsource\",\"caller\":\"redisstream/start.go:172\",\"msg\":\"dispatching the event on the data channel...\",\"eventSourceName\":\"redis-stream\",\"eventSourceType\":\"redisStream\",\"eventName\":\"example\",\"stream\":\"BAR\"}\r\n{\"level\":\"info\",\"ts\":1635165767.3950868,\"logger\":\"argo-events.eventsource\",\"caller\":\"eventsources/eventing.go:427\",\"msg\":\"succeeded to publish an event\",\"eventSourceName\":\"redis-stream\",\"eventName\":\"example\",\"eventSourceType\":\"redisStream\",\"eventID\":\"62353137353933352d363135392d346662642d396430622d393532376631396531353636\"}" + ] + } + }, + "metadata": { + "tags": [ + "argo-events", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "argo-events" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Serviceaccount" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-events/pull/1395", + "repo": "https://github.com/argoproj/argo-events", + "pr": "https://github.com/argoproj/argo-events/pull/1744" + }, + "reactions": 3, + "comments": 7, + "synthesizedBy": "regex", + "qualityScore": 71 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo-events installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:51.426Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo-rollouts/argo-rollouts-1001-feat-implement-rollout-status-command-fixes-596.json b/solutions/cncf-generated/argo-rollouts/argo-rollouts-1001-feat-implement-rollout-status-command-fixes-596.json new file mode 100644 index 00000000..f34d9114 --- /dev/null +++ b/solutions/cncf-generated/argo-rollouts/argo-rollouts-1001-feat-implement-rollout-status-command-fixes-596.json @@ -0,0 +1,75 @@ +{ + "version": "kc-mission-v1", + "name": "argo-rollouts-1001-feat-implement-rollout-status-command-fixes-596", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo-rollouts: feat: Implement rollout status command. Fixes #596", + "description": "This replicates the `kubectl rollout status deployment [--watch] ` command, but for Rollouts.\n\nThis is implemented as:\n\n```\n# Returns Rollout status: Healthy, Progressing, Degraded, etc\nkubectl argo rollouts status --watch=false \n\n# Waits until status is Healthy or Degraded (default behaviour without flags)\nkubectl argo rollouts status --watch \n```\n\nThe flags and defaults for them replicate the `kubectl rollout status deployment ` command. As in it, `--watch` is default a", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This replicates the `kubectl rollout status deployment [--watch] ` command, but for Rollouts.\n\nThis is implemented as:\n\n```\n# Returns Rollout status: Healthy, Progressing, Degraded, etc\nkubectl " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n# Returns Rollout status: Healthy, Progressing, Degraded, etc\r\nkubectl argo rollouts status --watch=false \r\n\r\n# Waits until status is Healthy or Degraded (default behaviour without flags)\r\nkubectl argo rollouts status --watch \n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> The flags and defaults for them replicate the kubectl rollout status deployment command. As in it, --watch is default and the default timeout is 5 minutes.\n\nIt seems that kubectl defaults to at timeout of 0s (never). Instead of defaulting to 5m, can we replicate kubectl behavior?\n\n```\n$ kubectl rollout status --help\n...\n --timeout=0s: The length of time to wait before ending watch, zero means never. Any other values should contain a\ncorresponding time unit (e.g. 1s, 2m, 3h).\n```", + "codeSnippets": [ + "# Returns Rollout status: Healthy, Progressing, Degraded, etc\r\nkubectl argo rollouts status --watch=false \r\n\r\n# Waits until status is Healthy or Degraded (default behaviour without flags)\r\nkubectl argo rollouts status --watch ", + "# Returns Rollout status: Healthy, Progressing, Degraded, etc\r\nkubectl argo rollouts status --watch=false \r\n\r\n# Waits until status is Healthy or Degraded (default behaviour without flags)\r\nkubectl argo rollouts status --watch ", + "| [Impacted Files](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [pkg/kubectl-argo-rollouts/cmd/status/status.go](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001/diff?src=pr&el=tree#diff-cGtnL2t1YmVjdGwtYXJnby1yb2xsb3V0cy9jbWQvc3RhdHVzL3N0YXR1cy5nbw==) | `93.44% <93.44%> (ø)` | |\n| [pkg/kubectl-argo-rollouts/cmd/cmd.go](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001/diff?src=pr&el=tree#diff-cGtnL2t1YmVjdGwtYXJnby1yb2xsb3V0cy9jbWQvY21kLmdv) | `100.00% <100.00%> (ø)` | |\n| [utils/evaluate/evaluate.go](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001/diff?src=pr&el=tree#diff-dXRpbHMvZXZhbHVhdGUvZXZhbHVhdGUuZ28=) | `60.74% <0.00%> (-0.73%)` | :arrow_down: |\n| [pkg/kubectl-argo-rollouts/cmd/create/create.go](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001/diff?src=pr&el=tree#diff-cGtnL2t1YmVjdGwtYXJnby1yb2xsb3V0cy9jbWQvY3JlYXRlL2NyZWF0ZS5nbw==) | `65.42% <0.00%> (-0.60%)` | :arrow_down: |\n| [metricproviders/wavefront/wavefront.go](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001/diff?src=pr&el=tree#diff-bWV0cmljcHJvdmlkZXJzL3dhdmVmcm9udC93YXZlZnJvbnQuZ28=) | `95.45% <0.00%> (-0.06%)` | :arrow_down: |\n| [analysis/analysis.go](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001/diff?src=pr&el=tree#diff-YW5hbHlzaXMvYW5hbHlzaXMuZ28=) | `83.80% <0.00%> (+0.09%)` | :arrow_up: |\n| [.../apis/rollouts/validation/validation\\_references.go](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001/diff?src=pr&el=tree#diff-cGtnL2FwaXMvcm9sbG91dHMvdmFsaWRhdGlvbi92YWxpZGF0aW9uX3JlZmVyZW5jZXMuZ28=) | `74.28% <0.00%> (+1.01%)` | :arrow_up: |\n| [rollout/service.go](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001/diff?src=pr&el=tree#diff-cm9sbG91dC9zZXJ2aWNlLmdv) | `78.94% <0.00%> (+4.66%)` | :arrow_up: |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001?src=pr&el=footer). Last update [dff1f22...3f754e8](https://codecov.io/gh/argoproj/argo-rollouts/pull/1001?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\nIs there anything preventing this from being merged? This would be huge for my organization's CD pipeline\n> The flags and defaults for them replicate the kubectl rollout status deployment command. As in it, --watch is default and the default timeout is 5 minutes.\r\n\r\nIt seems that kubectl defaults to at timeout of 0s (never). Instead of defaulting to 5m, can we replicate kubectl behavior?" + ] + } + }, + "metadata": { + "tags": [ + "argo-rollouts", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "argo-rollouts" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-rollouts/pull/1001", + "repo": "https://github.com/argoproj/argo-rollouts" + }, + "reactions": 4, + "comments": 8, + "synthesizedBy": "regex", + "qualityScore": 67 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo-rollouts installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:05.785Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo-workflows/argo-workflows-12722-feat-allow-lastretry-variables-in-expressions.json b/solutions/cncf-generated/argo-workflows/argo-workflows-12722-feat-allow-lastretry-variables-in-expressions.json new file mode 100644 index 00000000..ad42ae01 --- /dev/null +++ b/solutions/cncf-generated/argo-workflows/argo-workflows-12722-feat-allow-lastretry-variables-in-expressions.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "argo-workflows-12722-feat-allow-lastretry-variables-in-expressions", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo-workflows: feat: allow lastRetry variables in expressions", + "description": "### Motivation\n\n### Modifications\n\n### Verification\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: Workflow\nmetadata:\n name: pod-spec-patch\nspec:\n entrypoint: whalesay\n templates:\n - name: whalesay\n retryStrategy:\n limit: 5\n retryPolicy: Always\n podSpecPatch: '{\"containers\":[{\"name\":\"main\", \"resources\":{\"requests\":{\"memory\": \"{{=(sprig.int(lastRetry.exitCode)==1 ? int(retries) : 1)* 10}}Mi\" }}}]}'\n container:\n image: python:alpine3.6\n command: [\"python\", -c]\n ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Motivation\n\n### Modifications\n\n### Verification\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: Workflow\nmetadata:\n name: pod-spec-patch\nspec:\n entrypoint: whalesay\n templates:\n - name: whales" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: argoproj.io/v1alpha1\r\nkind: Workflow\r\nmetadata:\r\n name: pod-spec-patch\r\nspec:\r\n entrypoint: whalesay\r\n templates:\r\n - name: whalesay\r\n retryStrategy:\r\n limit: 5\r\n retryPolicy: Always\r\n podSpecPatch: '{\"containers\":[{\"name\":\"main\", \"resources\":{\"requests\":{\"memory\": \"{{=(sprig.int(lastRetry.exitCode)==1 ? int(retries) : 1)* 10}}Mi\" }}}]}'\r\n container:\r\n image: python:alpine3.6\r\n command: [\"python\", -c]\r\n args: [\"import sys; sys.exit(1)\"]\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/argoproj/argo-workflows/pull/14450. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### Motivation\n\nThis PR resurrects the older [PR](https://github.com/argoproj/argo-workflows/pull/12722) by @eduardodbr to support last retry variable in expressions. \n\n### Modifications \nThis adds a generic function to replace expressions. My change differs somewhat to @eduardodbr's. I didn't agree with his implementation so I changed it. The actual injection is pretty much exactly how retries get injected. \n\n### Verification\nVerified with the test.\n\n### Documentation\nModified the docs to add documentation on the `lastRetry` variable.", + "codeSnippets": [ + "apiVersion: argoproj.io/v1alpha1\r\nkind: Workflow\r\nmetadata:\r\n name: pod-spec-patch\r\nspec:\r\n entrypoint: whalesay\r\n templates:\r\n - name: whalesay\r\n retryStrategy:\r\n limit: 5\r\n retryPolicy: Always\r\n podSpecPatch: '{\"containers\":[{\"name\":\"main\", \"resources\":{\"requests\":{\"memory\": \"{{=(sprig.int(lastRetry.exitCode)==1 ? int(retries) : 1)* 10}}Mi\" }}}]}'\r\n container:\r\n image: python:alpine3.6\r\n command: [\"python\", -c]\r\n args: [\"import sys; sys.exit(1)\"]", + "apiVersion: argoproj.io/v1alpha1\r\nkind: Workflow\r\nmetadata:\r\n name: pod-spec-patch\r\nspec:\r\n entrypoint: whalesay\r\n templates:\r\n - name: whalesay\r\n retryStrategy:\r\n limit: 5\r\n retryPolicy: Always\r\n podSpecPatch: '{\"containers\":[{\"name\":\"main\", \"resources\":{\"requests\":{\"memory\": \"{{=(sprig.int(lastRetry.exitCode)==1 ? int(retries) : 1)* 10}}Mi\" }}}]}'\r\n container:\r\n image: python:alpine3.6\r\n command: [\"python\", -c]\r\n args: [\"import sys; sys.exit(1)\"]" + ] + } + }, + "metadata": { + "tags": [ + "argo-workflows", + "graduated", + "app-definition", + "troubleshoot", + "area-templating", + "area-retrystrategy" + ], + "cncfProjects": [ + "argo-workflows" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-workflows/pull/12722", + "repo": "https://github.com/argoproj/argo-workflows", + "pr": "https://github.com/argoproj/argo-workflows/pull/14450" + }, + "reactions": 4, + "comments": 4, + "synthesizedBy": "regex", + "qualityScore": 65 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo-workflows installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:40.896Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo-workflows/argo-workflows-1320-wip-add-task-names-to-pod-names.json b/solutions/cncf-generated/argo-workflows/argo-workflows-1320-wip-add-task-names-to-pod-names.json new file mode 100644 index 00000000..c8151ab2 --- /dev/null +++ b/solutions/cncf-generated/argo-workflows/argo-workflows-1320-wip-add-task-names-to-pod-names.json @@ -0,0 +1,76 @@ +{ + "version": "kc-mission-v1", + "name": "argo-workflows-1320-wip-add-task-names-to-pod-names", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo-workflows: WIP: Add Task names to Pod names", + "description": "* This proposal adds the name of each task to the name of the pod it is\n associated with. This makes it easier to keep track of which pod is\n performing which task.\n\nStill a work in progress - unit tests are failing, and there's probably a better way to deal with parentheses.\n\nThis would close https://github.com/argoproj/argo/issues/1319 if it is accepted.\n\nLet me know what you think :)", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "* This proposal adds the name of each task to the name of the pod it is\n associated with. This makes it easier to keep track of which pod is\n performing which task.\n\nStill a work in progress - unit " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ kubectl apply -f steps.yaml\r\nNAME READY STATUS RESTARTS AGE\r\nsteps-1781811180 0/2 Completed 0 7s\r\nsteps-1832144037 0/2 Completed 0 7s\r\nsteps-215716891 0/2 Completed 0 11s\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Here's a before/after example of this feature:\n\n**Before**\n```\n$ kubectl apply -f steps.yaml\nNAME READY STATUS RESTARTS AGE\nsteps-1781811180 0/2 Completed 0 7s\nsteps-1832144037 0/2 Completed 0 7s\nsteps-215716891 0/2 Completed 0 11s\n```\n\n**After**\n```\n$ kubectl apply -f steps.yaml\nNAME READY STATUS RESTARTS AGE\nsteps-0.hello1 0/2 Completed 0 12s\nsteps-1.hello2a 0/2 Completed 0 9s\nsteps-1.hello2b 0/2 Completed 0 9s\n```\n\nOne notable side effect of this change is that the pods are always listed in step-order.", + "codeSnippets": [ + "$ kubectl apply -f steps.yaml\r\nNAME READY STATUS RESTARTS AGE\r\nsteps-1781811180 0/2 Completed 0 7s\r\nsteps-1832144037 0/2 Completed 0 7s\r\nsteps-215716891 0/2 Completed 0 11s", + "$ kubectl apply -f steps.yaml\r\nNAME READY STATUS RESTARTS AGE\r\nsteps-1781811180 0/2 Completed 0 7s\r\nsteps-1832144037 0/2 Completed 0 7s\r\nsteps-215716891 0/2 Completed 0 11s", + "$ kubectl apply -f steps.yaml\r\nNAME READY STATUS RESTARTS AGE\r\nsteps-0.hello1 0/2 Completed 0 12s\r\nsteps-1.hello2a 0/2 Completed 0 9s\r\nsteps-1.hello2b 0/2 Completed 0 9s", + "golangci-lint run --config golangci.yml \r\nlevel=error msg=\"[runner] 0/12 linters finished: deadline exceeded\" \r\nlevel=error msg=\"Deadline exceeded: try increase it by passing --deadline option\" \r\nMakefile:133: recipe for target 'lint' failed \r\nmake: *** [lint] Error 4" + ] + } + }, + "metadata": { + "tags": [ + "argo-workflows", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "argo-workflows" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-workflows/pull/1320", + "repo": "https://github.com/argoproj/argo-workflows" + }, + "reactions": 9, + "comments": 27, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo-workflows installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:18.275Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo-workflows/argo-workflows-14766-fix-cluster-workflow-template-store-is-not-initialized-in-n.json b/solutions/cncf-generated/argo-workflows/argo-workflows-14766-fix-cluster-workflow-template-store-is-not-initialized-in-n.json new file mode 100644 index 00000000..f218d916 --- /dev/null +++ b/solutions/cncf-generated/argo-workflows/argo-workflows-14766-fix-cluster-workflow-template-store-is-not-initialized-in-n.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "argo-workflows-14766-fix-cluster-workflow-template-store-is-not-initialized-in-n", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo-workflows: fix: cluster workflow template store is not initialized in namespace mode. Fixes #14763", + "description": "### Motivation\n\nSubmitting a workflow via UI, either from workflow template or via cron workflow, in namespace mode is failing because the informer is not initialized. \n\n### Modifications\n\nThe template store is always initialized to NullClusterWorkflowTemplate when RBAC is not enabled.\n\n### Verification\n\nRun with [namespace-install.yaml](https://github.com/argoproj/argo-workflows/releases/download/v3.7.0/namespace-install.yaml).\n\nSubmited via:\n\nkubectl create\nArgo submit\nCron submit via UI\nWorkf", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Motivation\n\nSubmitting a workflow via UI, either from workflow template or via cron workflow, in namespace mode is failing because the informer is not initialized. \n\n### Modifications\n\nThe templat" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n2025/08/18 19:01:05 http: panic serving [::1]:53376: runtime error: invalid memory address or nil pointer dereference\r\ngoroutine 14 [running]:\r\nnet/http.(*conn).serve.func1()\r\n /Users/uqzb/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.darwin-arm64/src/net/http/server.go:1943 +0xb4\r\npanic({0x104a2dae0?, 0x1067493c0?})\r\n /Users/uqzb/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.darwin-arm64/src/runtime/panic.go:783 +0x120\r\ngithub.com/argoproj/argo-workflows/v3/server/workflow.(*w\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/argoproj/argo-workflows/pull/14826. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Cherry-picked fix: cluster workflow template store is not initialized in namespace mode. Fixes #14763 from #14766", + "codeSnippets": [ + "2025/08/18 19:01:05 http: panic serving [::1]:53376: runtime error: invalid memory address or nil pointer dereference\r\ngoroutine 14 [running]:\r\nnet/http.(*conn).serve.func1()\r\n /Users/uqzb/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.darwin-arm64/src/net/http/server.go:1943 +0xb4\r\npanic({0x104a2dae0?, 0x1067493c0?})\r\n /Users/uqzb/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.darwin-arm64/src/runtime/panic.go:783 +0x120\r\ngithub.com/argoproj/argo-workflows/v3/server/workflow.(*w", + "2025/08/18 19:01:05 http: panic serving [::1]:53376: runtime error: invalid memory address or nil pointer dereference\r\ngoroutine 14 [running]:\r\nnet/http.(*conn).serve.func1()\r\n /Users/uqzb/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.darwin-arm64/src/net/http/server.go:1943 +0xb4\r\npanic({0x104a2dae0?, 0x1067493c0?})\r\n /Users/uqzb/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.darwin-arm64/src/runtime/panic.go:783 +0x120\r\ngithub.com/argoproj/argo-workflows/v3/server/workflow.(*workflowServer).SubmitWorkflow(0x14000830580, {0x104ee8598, 0x1400099c550}, 0x14000518ae0)\r\n /Users/uqzb/go/pkg/mod/github.com/eduardodbr/argo-workflows/v3@v3.0.0-20250815172450-f2599c34015f/server/workflow/workflow_server.go:804 +0x694\r\ngithub.com/argoproj/argo-workflows/v3/pkg/apiclient.(*argoKubeWorkflowServiceClient).SubmitWorkflow(0x23?, {0x104ee8598?, 0x1400099c550?}, 0x1040e51ac?, {0x14000893240?, 0xb?, 0x14000069788?})\r\n /Users/uqzb/go/pkg/mod/github.com/eduardodbr/argo-workflows/v3@v3.0.0-20250815172450-f2599c34015f/pkg/apiclient/argo-kube-workflow-service-client.go:122 +0x2c\r\ngithub.com/argoproj/argo-workflows/v3/pkg/apiclient.(*errorTranslatingWorkflowServiceClient).SubmitWorkflow(0x104eb2378?, {0x104ee8598?, 0x1400099c550?}, 0x1046e35f8?, {0x140000698f8?, 0x2?, 0x2?})\r\n /Users/uqzb/go/pkg/mod/github.com/eduardodbr/argo-workflows/v3@v3.0.0-20250815172450-f2599c34015f/pkg/apiclient/error-translating-workflow-service-client.go:100 +0x38\r\nmain.(*Handler).processAlert(0x1400082ce70, {0x104ee8598, 0x1400099c550}, 0x14000830800, {{0x140008931c0, 0x6}, 0x14000b89aa0, 0x14000b89ad0, {0x2c10513c, 0xed2f600ba, ...}, ...})", + "\r\n\r\n
\r\n Backtrace" + ] + } + }, + "metadata": { + "tags": [ + "argo-workflows", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "argo-workflows" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-workflows/pull/14766", + "repo": "https://github.com/argoproj/argo-workflows", + "pr": "https://github.com/argoproj/argo-workflows/pull/14826" + }, + "reactions": 8, + "comments": 8, + "synthesizedBy": "regex", + "qualityScore": 66 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo-workflows installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:20.126Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo-workflows/argo-workflows-15107-fix-avoid-resetting-resourceversion-for-watch-fixes-15106.json b/solutions/cncf-generated/argo-workflows/argo-workflows-15107-fix-avoid-resetting-resourceversion-for-watch-fixes-15106.json new file mode 100644 index 00000000..e77854d2 --- /dev/null +++ b/solutions/cncf-generated/argo-workflows/argo-workflows-15107-fix-avoid-resetting-resourceversion-for-watch-fixes-15106.json @@ -0,0 +1,78 @@ +{ + "version": "kc-mission-v1", + "name": "argo-workflows-15107-fix-avoid-resetting-resourceversion-for-watch-fixes-15106", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo-workflows: fix: Avoid resetting resourceVersion for watch. Fixes #15106", + "description": "Fixes Avoid resetting resourceVersion for watch. Fixes #15106\n\n### Modifications\n\nCheck resourceVersion before resetting to avoid setting to \"\" for watch connection\n\n### Verification\n\nNo repeated error in #15106 after fix.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Fixes Avoid resetting resourceVersion for watch. Fixes #15106\n\n### Modifications\n\nCheck resourceVersion before resetting to avoid setting to \"\" for watch connection\n\n### Verification\n\nNo repeated erro" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nAuthor: jackcui \r\nDate: Fri Dec 5 11:25:15 2025 +0800\r\n\r\n fix: Avoid resetting resourceVersion for watch. Fixes #15106\r\n \r\n Signed-off-by: Jack Cui \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/argoproj/argo-workflows/pull/15090. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### Motivation\n\nMultiple issues have been created because of unexpected workflow behavior:\n\n#13986\n#14833 \n#12352\n#14780\n\nIt appears that many of these issues occur because the controller is processing an outdated version of the workflow. The exact cause of these stale reads is still unknown, but there is some suspicion that it may be related to the informer write-back mechanism, which is being disabled by default in #15079.\nThis PR ensures that stale workflow versions are not reconciled by keeping track of the last processed resource version for each workflow in a last-seen-version annotation. A workflow is only processed when its annotation matches the expected version; otherwise, it is re-queued. The annotation stores the workflow’s resource version, though any unique value would work. I just thought using the RV was enough.\n\n### Modifications\n\n- Introduce a new `last-seen-version` annotation, updated with the current resource version on every `Update()` event.\n- Store the last-seen-version of each workflow in memory. When a workflow is processed, it proceeds only if the annotation matches the stored version.\n- If no stored version exists (e.g., after a controller restart), the workflow is always processed to allow normal recovery.\n- The in-memory entry is removed as soon as a `Delete` event is received or when the workflow completes.\n\n### Verification\n\nExecuted workflows with success. \n\n### Documentation\n\n## Summary by CodeRabbit\n\n* **New Features**\n * Workflow version t", + "codeSnippets": [ + "Author: jackcui \r\nDate: Fri Dec 5 11:25:15 2025 +0800\r\n\r\n fix: Avoid resetting resourceVersion for watch. Fixes #15106\r\n \r\n Signed-off-by: Jack Cui ", + "Author: jackcui \r\nDate: Fri Dec 5 11:25:15 2025 +0800\r\n\r\n fix: Avoid resetting resourceVersion for watch. Fixes #15106\r\n \r\n Signed-off-by: Jack Cui " + ] + } + }, + "metadata": { + "tags": [ + "argo-workflows", + "graduated", + "app-definition", + "troubleshoot", + "cherry-pick-3-7" + ], + "cncfProjects": [ + "argo-workflows" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-workflows/pull/15107", + "repo": "https://github.com/argoproj/argo-workflows", + "pr": "https://github.com/argoproj/argo-workflows/pull/15090" + }, + "reactions": 7, + "comments": 9, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo-workflows installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:23.117Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo-workflows/argo-workflows-1716-feat-added-onexit-handlers-to-arbitrary-templates.json b/solutions/cncf-generated/argo-workflows/argo-workflows-1716-feat-added-onexit-handlers-to-arbitrary-templates.json new file mode 100644 index 00000000..f491522e --- /dev/null +++ b/solutions/cncf-generated/argo-workflows/argo-workflows-1716-feat-added-onexit-handlers-to-arbitrary-templates.json @@ -0,0 +1,89 @@ +{ + "version": "kc-mission-v1", + "name": "argo-workflows-1716-feat-added-onexit-handlers-to-arbitrary-templates", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo-workflows: feat: Added onExit handlers to arbitrary templates", + "description": "Fixes: https://github.com/argoproj/argo/issues/1688. Added onExit handlers to any template.\n\nExample:\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: Workflow\nmetadata:\n generateName: suspend-template-\nspec:\n entrypoint: suspend\n templates:\n - name: suspend\n steps:\n - - name: steps1\n template: stepsTempalte\n - - name: steps2\n template: stepsTempalte\n\n - name: stepsTempalte\n onExit: exitContainer\n steps:\n - - name: leafA\n template: whalesay\n - - name:", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Fixes: https://github.com/argoproj/argo/issues/1688. Added onExit handlers to any template.\n\nExample:\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: Workflow\nmetadata:\n generateName: suspend-template" + }, + { + "title": "`container` (`suspend`, `script`, and `resource` work similarly):", + "description": "`container` (`suspend`, `script`, and `resource` work similarly):" + }, + { + "title": "`container` in `steps` (works similarly as in `dag` as well as nested `steps`...", + "description": "`container` in `steps` (works similarly as in `dag` as well as nested `steps` and `dag`s): See example file: https://github.com/argoproj/argo/blob/538415c9902d8a04ccdee2f563e69a37c06bbc24/examples/template-on-exit.yaml" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: argoproj.io/v1alpha1\r\nkind: Workflow\r\nmetadata:\r\n generateName: suspend-template-\r\nspec:\r\n entrypoint: suspend\r\n templates:\r\n - name: suspend\r\n steps:\r\n - - name: steps1\r\n template: stepsTempalte\r\n - - name: steps2\r\n template: stepsTempalte\r\n\r\n - name: stepsTempalte\r\n onExit: exitContainer\r\n steps:\r\n - - name: leafA\r\n template: whalesay\r\n - - name: leafB\r\n template: whalesay\r\n\r\n - name: whalesay\r\n container:\r\n image: dock\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Implementation is now done and should be ready for review. `onExit` handlers work for all template types, inducing leaf nodes. Some examples:\n\n1. `container` (`suspend`, `script`, and `resource` work similarly):\n```\napiVersion: argoproj.io/v1alpha1\nkind: Workflow\nmetadata:\n generateName: suspend-template-\nspec:\n entrypoint: whalesay\n templates:\n - name: whalesay\n onExit: exitContainer\n suspend:\n duration: 5\n\n - name: exitContainer\n container:\n image: docker/whalesay\n command: [cowsay]\n args: [\"goodbye world\"]\n\n```\nOutput:\n```\nName: suspend-template-szt9p\nNamespace: argo\nServiceAccount: default\nStatus: Succeeded\nCreated: Wed Nov 06 08:32:20 -0800 (6 minutes ago)\nStarted: Wed Nov 06 08:32:20 -0800 (6 minutes ago)\nFinished: Wed Nov 06 08:32:31 -0800 (5 minutes ago)\nDuration: 11 seconds\n\nSTEP PODNAME DURATION MESSAGE\n ✔ suspend-template-szt9p (whalesay) suspend-template-szt9p 5s\n\n ✔ suspend-template-szt9p.onExit (exitContainer) suspend-template-szt9p-633898630 4s\n```\n\n2. `container` in `steps` (works similarly as in `dag` as well as nested `steps` and `dag`s): See example file: https://github.com/argoproj/argo/blob/538415c9902d8a04ccdee2f563e69a37c06bbc24/examples/template-on-exit.yaml\n\nOutput:\n```\nName: container-on-exit-m6wq5\nNamespace:", + "codeSnippets": [ + "apiVersion: argoproj.io/v1alpha1\r\nkind: Workflow\r\nmetadata:\r\n generateName: suspend-template-\r\nspec:\r\n entrypoint: suspend\r\n templates:\r\n - name: suspend\r\n steps:\r\n - - name: steps1\r\n template: stepsTempalte\r\n - - name: steps2\r\n template: stepsTempalte\r\n\r\n - name: stepsTempalte\r\n onExit: exitContainer\r\n steps:\r\n - - name: leafA\r\n template: whalesay\r\n - - name: leafB\r\n template: whalesay\r\n\r\n - name: whalesay\r\n container:\r\n image: dock", + "apiVersion: argoproj.io/v1alpha1\r\nkind: Workflow\r\nmetadata:\r\n generateName: suspend-template-\r\nspec:\r\n entrypoint: suspend\r\n templates:\r\n - name: suspend\r\n steps:\r\n - - name: steps1\r\n template: stepsTempalte\r\n - - name: steps2\r\n template: stepsTempalte\r\n\r\n - name: stepsTempalte\r\n onExit: exitContainer\r\n steps:\r\n - - name: leafA\r\n template: whalesay\r\n - - name: leafB\r\n template: whalesay\r\n\r\n - name: whalesay\r\n container:\r\n image: docker/whalesay\r\n command: [cowsay]\r\n args: [\"hello world\"]\r\n\r\n - name: exitContainer\r\n container:\r\n image: docker/whalesay\r\n command: [cowsay]\r\n args: [\"goodbye world\"]", + "Name: suspend-template-nxslc\r\nNamespace: argo\r\nServiceAccount: default\r\nStatus: Succeeded\r\nCreated: Tue Oct 29 09:08:18 -0700 (37 seconds ago)\r\nStarted: Tue Oct 29 09:08:18 -0700 (37 seconds ago)\r\nFinished: Tue Oct 29 09:08:55 -0700 (now)\r\nDuration: 37 seconds\r\n\r\nSTEP PODNAME DURATION MESSAGE\r\n ✔ suspend-template-nxslc (suspend)\r\n ├---✔ steps1 (stepsTempalte)\r\n | ├---✔ leafA (whalesay) suspend-template-nxslc-4023060684 4s\r\n | ├---✔ leafB (whalesay) suspend-template-nxslc-1080227016 4s\r\n | └-✔ onExit (exitContainer) suspend-template-nxslc-3814888236 5s\r\n └---✔ steps2 (stepsTempalte)\r\n ├---✔ leafA (whalesay) suspend-template-nxslc-3324890710 4s\r\n ├---✔ leafB (whalesay) suspend-template-nxslc-4274891518 4s\r\n └-✔ onExit (exitContainer) suspend-template-nxslc-2655944446 5s", + "apiVersion: argoproj.io/v1alpha1\r\nkind: Workflow\r\nmetadata:\r\n generateName: suspend-template-\r\nspec:\r\n entrypoint: whalesay\r\n templates:\r\n - name: whalesay\r\n onExit: exitContainer\r\n suspend:\r\n duration: 5\r\n\r\n - name: exitContainer\r\n container:\r\n image: docker/whalesay\r\n command: [cowsay]\r\n args: [\"goodbye world\"]", + "Name: suspend-template-szt9p\r\nNamespace: argo\r\nServiceAccount: default\r\nStatus: Succeeded\r\nCreated: Wed Nov 06 08:32:20 -0800 (6 minutes ago)\r\nStarted: Wed Nov 06 08:32:20 -0800 (6 minutes ago)\r\nFinished: Wed Nov 06 08:32:31 -0800 (5 minutes ago)\r\nDuration: 11 seconds\r\n\r\nSTEP PODNAME DURATION MESSAGE\r\n ✔ suspend-template-szt9p (whalesay) suspend-template-szt9p 5s\r\n\r\n ✔ suspend-template-szt9p.onExit (exitContainer) suspend-template-szt9p-633898630 4s" + ] + } + }, + "metadata": { + "tags": [ + "argo-workflows", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "argo-workflows" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Namespace", + "Serviceaccount", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-workflows/pull/1716", + "repo": "https://github.com/argoproj/argo-workflows" + }, + "reactions": 5, + "comments": 20, + "synthesizedBy": "regex", + "qualityScore": 70 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo-workflows installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:33.618Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo-workflows/argo-workflows-1758-feat-support-for-scheduled-workflows-with-cronworkflow-crd.json b/solutions/cncf-generated/argo-workflows/argo-workflows-1758-feat-support-for-scheduled-workflows-with-cronworkflow-crd.json new file mode 100644 index 00000000..05108e53 --- /dev/null +++ b/solutions/cncf-generated/argo-workflows/argo-workflows-1758-feat-support-for-scheduled-workflows-with-cronworkflow-crd.json @@ -0,0 +1,79 @@ +{ + "version": "kc-mission-v1", + "name": "argo-workflows-1758-feat-support-for-scheduled-workflows-with-cronworkflow-crd", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo-workflows: feat: Support for scheduled Workflows with CronWorkflow CRD", + "description": "Closes: https://github.com/argoproj/argo/issues/1229\n\n**Design**\n\nImplements a `CronWorkflow` CRD that defines a scheduled `Workflow`. The design is such that any `Workflow` can be simply converted to a `CronWorkflow` by replacing `kind: Workflow` with `kind: CronWorkflow` and adding some `CronWorkflowOptions` (in short: `CronWorkflow` = `Workflow` + `CronWorkflowOptions`).\n\nExample:\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: CronWorkflow\nmetadata:\n name: test-cron-wf\nspec:\n schedule: \"* *", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Closes: https://github.com/argoproj/argo/issues/1229\n\n**Design**\n\nImplements a `CronWorkflow` CRD that defines a scheduled `Workflow`. The design is such that any `Workflow` can be simply converted to" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: argoproj.io/v1alpha1\r\nkind: CronWorkflow\r\nmetadata:\r\n name: test-cron-wf\r\nspec:\r\n schedule: \"* * * * *\"\r\n concurrencyPolicy: \"Replace\"\r\n startingDeadlineSeconds: 0\r\n workflowSpec:\r\n entrypoint: whalesay\r\n templates:\r\n - name: whalesay\r\n container:\r\n image: alpine:3.6\r\n command: [sh, -c]\r\n args: [\"date; sleep 90\"]\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "The MVP is done and this PR should now be ready for review. Some usage examples:\n\n`cron.yaml`:\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: CronWorkflow\nmetadata:\n name: test-cron-wf\nspec:\n schedule: \"* * * * *\"\n concurrencyPolicy: \"Replace\"\n startingDeadlineSeconds: 0\n workflowSpec:\n entrypoint: whalesay\n templates:\n - name: whalesay\n container:\n image: alpine:3.6\n command: [sh, -c]\n args: [\"date; sleep 90\"]\n```\n```\n$ argo cron create cron.yaml\nName: test-cron-wf\nNamespace: argo\nCreated: Mon Nov 18 10:17:06 -0800 (now)\nSchedule: * * * * *\nSuspended: false\nStartingDeadlineSeconds: 0\nConcurrencyPolicy: Forbid\n\n$ argo cron list\nNAME AGE LAST RUN SCHEDULE SUSPENDED\ntest-cron-wf 49s N/A * * * * * false\n\n# some time passes\n\n$ argo cron list\nNAME AGE LAST RUN SCHEDULE SUSPENDED\ntest-cron-wf 56s 2s * * * * * false\n\n$ argo cron get test-cron-wf\nName: test-cron-wf\nNamespace: argo\nCreated: Mon Nov 18 10:17:06 -0800 (4 minutes ago)\nSchedule: * * * * *\nSuspended: false\nStartingDeadlineSeconds: 0\nConcurrencyPolicy: Replace\nLastScheduledTime: Mon Nov 18 10:21:00 -0800 (51 seconds ago)\nActive Workflows: test-cron-wf-r", + "codeSnippets": [ + "apiVersion: argoproj.io/v1alpha1\r\nkind: CronWorkflow\r\nmetadata:\r\n name: test-cron-wf\r\nspec:\r\n schedule: \"* * * * *\"\r\n concurrencyPolicy: \"Replace\"\r\n startingDeadlineSeconds: 0\r\n workflowSpec:\r\n entrypoint: whalesay\r\n templates:\r\n - name: whalesay\r\n container:\r\n image: alpine:3.6\r\n command: [sh, -c]\r\n args: [\"date; sleep 90\"]", + "apiVersion: argoproj.io/v1alpha1\r\nkind: CronWorkflow\r\nmetadata:\r\n name: test-cron-wf\r\nspec:\r\n schedule: \"* * * * *\"\r\n concurrencyPolicy: \"Replace\"\r\n startingDeadlineSeconds: 0\r\n workflowSpec:\r\n entrypoint: whalesay\r\n templates:\r\n - name: whalesay\r\n container:\r\n image: alpine:3.6\r\n command: [sh, -c]\r\n args: [\"date; sleep 90\"]", + "apiVersion: argoproj.io/v1alpha1\r\nkind: CronWorkflow\r\nmetadata:\r\n name: test-cron-wf\r\nspec:\r\n schedule: \"* * * * *\"\r\n concurrencyPolicy: \"Replace\"\r\n startingDeadlineSeconds: 0\r\n workflowSpec:\r\n entrypoint: whalesay\r\n templates:\r\n - name: whalesay\r\n container:\r\n image: alpine:3.6\r\n command: [sh, -c]\r\n args: [\"date; sleep 90\"]", + "$ argo cron create cron.yaml\r\nName: test-cron-wf\r\nNamespace: argo\r\nCreated: Mon Nov 18 10:17:06 -0800 (now)\r\nSchedule: * * * * *\r\nSuspended: false\r\nStartingDeadlineSeconds: 0\r\nConcurrencyPolicy: Forbid\r\n\r\n$ argo cron list\r\nNAME AGE LAST RUN SCHEDULE SUSPENDED\r\ntest-cron-wf 49s N/A * * * * * false\r\n\r\n# some time passes\r\n\r\n$ argo cron list\r\nNAME AGE LAST RUN SCHEDULE SUSPENDED\r\ntest-cron-wf 56s 2s * * * * * false\r\n\r\n$ argo cron get test-cron-wf\r\nName: test-cron-wf\r\nNamespace: argo\r\nCreated: Mon Nov 18 10:17:06 -0800 (4 minutes ago)\r\nSchedule: * * * * *\r\nSuspended: false\r\nStartingDeadlineSeconds: 0\r\nConcurrencyPolicy: Replace\r\nLastScheduledTime: Mon Nov 18 10:21:00 -0800 (51 seconds ago)\r\nActive Workflows: test-cron-wf-rt4nf", + "$ kubectl create -n argo rolebinding argo-cron --clusterrole=admin --serviceaccount=argo:argo" + ] + } + }, + "metadata": { + "tags": [ + "argo-workflows", + "graduated", + "app-definition", + "troubleshoot", + "area-cron-workflows" + ], + "cncfProjects": [ + "argo-workflows" + ], + "targetResourceKinds": [ + "Job", + "Cronjob" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-workflows/pull/1758", + "repo": "https://github.com/argoproj/argo-workflows" + }, + "reactions": 31, + "comments": 24, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo-workflows installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:14.018Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo-workflows/argo-workflows-6282-feat-ui-inject-custom-css.json b/solutions/cncf-generated/argo-workflows/argo-workflows-6282-feat-ui-inject-custom-css.json new file mode 100644 index 00000000..e478fd1c --- /dev/null +++ b/solutions/cncf-generated/argo-workflows/argo-workflows-6282-feat-ui-inject-custom-css.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "argo-workflows-6282-feat-ui-inject-custom-css", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo-workflows: feat(ui): Inject Custom CSS", + "description": "Checklist:\n\n* [x] My organization is added to [USERS.md](https://github.com/argoproj/argo-workflows/blob/master/USERS.md).\n\n\"Screen\n\nYou can test this feature by executing the following commands.\n\n```bash\n$ kubectl -n argo create configmap custom-css --from-literal \"custom.css=.nav-bar { background-color: #2196f3; }\"\n$ kubectl -n argo pa", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Checklist:\n\n* [x] My organization is added to [USERS.md](https://github.com/argoproj/argo-workflows/blob/master/USERS.md).\n\n\"Screen (ø)` | |\n| [pkg/apiclient/http1/info-service-client.go](https://codecov.io/gh/argoproj/argo-workflows/pull/6282/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-cGtnL2FwaWNsaWVudC9odHRwMS9pbmZvLXNlcnZpY2UtY2xpZW50Lmdv) | `0.00% <0.00%> (ø)` | |\n| [server/info/info\\_server.go](https://codecov.io/gh/argoproj/argo-workflows/pull/6282/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-c2VydmVyL2luZm8vaW5mb19zZXJ2ZXIuZ28=) | `47.82% <0.00%> (-10.07%)` | :arrow_down: |\n| [cmd/argo/commands/server.go](https://codecov.io/gh/argoproj/argo-workflows/pull/6282/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-Y21kL2FyZ28vY29tbWFuZHMvc2VydmVyLmdv) | `32.82% <50.00%> (+0.54%)` | :arrow_up: |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/argoproj/argo-workflows/pull/6282?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/argoproj/argo-workflows/pull/6282?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). Last update [d7c0977...4f40b9e](https://codecov.io/gh/argoproj/argo-workflows/pull/6282?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n\nI updated the code so the server won't return 304 on removed extra files. \nI suggest doing this different.\r\n\r\n1. Add a new field to GetInfo called “env”. This can be “dev” , “e2e” or “prod”.\r\n2. Use this to load a CSS file based on the name. Eg dev.css\r\n3. Bundle CSS using those conventional names.\r\n4. Add a new parameter to Argo server, “—env” or “ARGO_ENV”.\r\n5. Display that value below the version number top left.\r\n\r\nThis would be less flexible, but flexible enough.\r\nIt would be secure.\r\nIt would be simple, I think we’d use it.\nThank you for your suggestion. I have 2 questions for the env idea. \n\nWhat env should we use for a locally running argo server?\n\nWe could have 2 argo servers with different instance ID in the production. Should we use different env names like prod_1 and prod_2?\nBy the way, as for the env-like name, I injected it by the following CSS." + ] + } + }, + "metadata": { + "tags": [ + "argo-workflows", + "graduated", + "app-definition", + "troubleshoot", + "area-ui", + "problem-stale", + "area-server" + ], + "cncfProjects": [ + "argo-workflows" + ], + "targetResourceKinds": [ + "Deployment", + "Configmap" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-workflows/pull/6282", + "repo": "https://github.com/argoproj/argo-workflows", + "pr": "https://github.com/argoproj/argo-workflows/pull/7387" + }, + "reactions": 10, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 66 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo-workflows installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:17.207Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-10432-feat-multiple-sources-for-applications.json b/solutions/cncf-generated/argo/argo-10432-feat-multiple-sources-for-applications.json new file mode 100644 index 00000000..864d9abf --- /dev/null +++ b/solutions/cncf-generated/argo/argo-10432-feat-multiple-sources-for-applications.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "argo-10432-feat-multiple-sources-for-applications", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo: feat: Multiple sources for applications", + "description": "This change enables users to provide multiple resources for the Application. The change aims to be fully backwards compatible.\n\nThis PR implements proposal https://github.com/argoproj/argo-cd/pull/8322.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This change enables users to provide multiple resources for the Application. The change aims to be fully backwards compatible.\n\nThis PR implements proposal https://github.com/argoproj/argo-cd/pull/832" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nspec:\r\n source:\r\n repoURL: https://github.com/argoproj/argocd-example-apps.git\r\n path: guestbook\r\n targetRevision: HEAD\r\n sources:\r\n - chart: elasticsearch\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n repoURL: https://helm.elastic.co\r\n targetRevision: 7.6.0\r\n - repoURL: https://github.com/argoproj/argocd-example-apps.git\r\n path: guestbook\r\n targetRevision: HEAD\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/argoproj/argo-cd/pull/9609. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This is just a draft PR to indicate that the work has started on the proposal implementation. \n**The PR is not ready for review.**\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [ ] The title of the PR states what changed and the related issues number (used for the release note).\n* [ ] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [ ] Does this PR require documentation updates?\n* [ ] I've updated documentation as required by this PR.\n* [ ] Optional. My organization is added to USERS.md.\n* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)\n* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.\n* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).", + "codeSnippets": [ + "spec:\r\n source:\r\n repoURL: https://github.com/argoproj/argocd-example-apps.git\r\n path: guestbook\r\n targetRevision: HEAD\r\n sources:\r\n - chart: elasticsearch\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n repoURL: https://helm.elastic.co\r\n targetRevision: 7.6.0\r\n - repoURL: https://github.com/argoproj/argocd-example-apps.git\r\n path: guestbook\r\n targetRevision: HEAD", + "spec:\r\n source:\r\n repoURL: https://github.com/argoproj/argocd-example-apps.git\r\n path: guestbook\r\n targetRevision: HEAD\r\n sources:\r\n - chart: elasticsearch\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n repoURL: https://helm.elastic.co\r\n targetRevision: 7.6.0\r\n - repoURL: https://github.com/argoproj/argocd-example-apps.git\r\n path: guestbook\r\n targetRevision: HEAD", + "spec:\r\n sources:\r\n - repoURL: https://github.com/my-org/my-repo # path is missing so no manifests are generated\r\n targetRevision: master\r\n ref: myRepo # repo is available via symlink \"myRepo\"\r\n - repoURL: https://github.com/helm/charts\r\n targetRevision: master\r\n path: incubator/elasticsearch # path \"incubator/elasticsearch\" is used to generate manifests\r\n helm:\r\n valueFiles:\r\n - $myRepo/values.yaml # values.yaml is located in source with reference name $myRepo", + "apiVersion: argoproj.io/v1alpha1\r\nkind: Application\r\nmetadata:\r\n name: guestbook\r\n namespace: argocd\r\n labels:\r\n argocd.argoproj.io/refresh: hard\r\nspec:\r\n project: default\r\n syncPolicy:\r\n automated:\r\n prune: true\r\n selfHeal: true\r\n destination:\r\n server: https://kubernetes.default.svc\r\n namespace: argocd\r\n sources:\r\n - chart: elasticsearch\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n repoURL: https://helm.elastic.co\r\n targetRevision: 7.6.0\r\n - repoURL: https://github.com/argoproj/argocd-example-apps.git\r\n path: guestbook\r\n targetRevision: HEAD\r\n - chart: elasticsearch\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n repoURL: https://helm.elastic.co\r\n targetRevision: 7.7.0", + "
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/argoproj/argo-cd/pull/10432?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n\nThanks for the PR! I'm looking forward to v2.5.0.\r\n\r\nJust a question about plugin, how will these remote sources work in case of using plugins? In our case, we use the helm-secrets plugin to fetch secrets directly from Vault by an argocd custom plugin. I wondered how I can integrate that with remote values files from git without doing so much manual scripting.\nThanks for the PR! \r\n\n> Thanks for the PR! I'm looking forward to v2.5.0.\r\n> \r\n> Just a question about plugin, how will these remote sources work in case of using plugins? In our case, we use the helm-secrets plugin to fetch secrets directly from Vault by an argocd custom plugin. I wondered how I can integrate that with remote values files from git without doing so much manual scripting.\r\n\r\nHi @amohamedhey, we are not going to support CMP with initial release of multiple sources in v2.5, but the feature would follow up as an extension to this feature in future releases.\nIt looks as if this addresses #677, yet I didn't find the issue mentioned here.\n@tback updated the PR description, thanks!\nI still have issues testing this code locally. What is the proper way to use the new `sources` field?\r\n\r\nIs it complementary to `source`, or is a `source` xor `sources` decision?" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-cd/pull/10432", + "repo": "https://github.com/argoproj/argo-cd", + "pr": "https://github.com/argoproj/argo-cd/pull/9609" + }, + "reactions": 147, + "comments": 85, + "synthesizedBy": "regex", + "qualityScore": 67 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:35.220Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-11183-feat-appset-add-stringtemplate-field-to-spec.json b/solutions/cncf-generated/argo/argo-11183-feat-appset-add-stringtemplate-field-to-spec.json new file mode 100644 index 00000000..c355d192 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-11183-feat-appset-add-stringtemplate-field-to-spec.json @@ -0,0 +1,78 @@ +{ + "version": "kc-mission-v1", + "name": "argo-11183-feat-appset-add-stringtemplate-field-to-spec", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo: feat(appset): Add stringTemplate field to spec", + "description": "Closes: #11213\n\nThis will introduce a new field `stringTemplate` that will allow a less restrictive templating of the Application to be generated as the current design is limited by field.\n\nRelated discussions:\n- https://github.com/argoproj/argo-cd/pull/10026#issuecomment-1236888623\n\n*This work was started in https://github.com/argoproj/argo-cd/pull/9873*\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Closes: #11213\n\nThis will introduce a new field `stringTemplate` that will allow a less restrictive templating of the Application to be generated as the current design is limited by field.\n\nRelated di" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nHello @speedfl, thanks for the prompt reply,\r\n\r\n> @mrmm are you sure you want to create the method `renderWithGoTemplate`. Can the `Replace` in utils do the job ?\r\n\r\nYes totally, I have just tried to use the work of @rishabh625 (as it was his idea) but using `r.Replace` does the job (which I have updated in b91759c)\r\n\r\n> Concerning your tests as you are passing the `stringTemplate` you need to add test with `stringTemplate != nil` in the `utils_test.go`\r\n\r\nThanks for the pointer on how to test, \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/argoproj/argo-cd/pull/9873. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Note on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.\n* [ ] The title of the PR states what changed and the related issues number (used for the release note).\n* [ ] I've included \"Closes [ISSUE #]\" or \"Fixes [ISSUE #]\" in the description to automatically close the associated issue.\n* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.\n* [ ] Does this PR require documentation updates?\n* [ ] I've updated documentation as required by this PR.\n* [ ] Optional. My organization is added to USERS.md.\n* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)\n* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.\n* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)). \n\nThis is the PR moved from argoproj/applicationset \n\nhttps://github.com/argoproj/applicationset/pull/513\n\nThanks to @vavdoshka for all the effo", + "codeSnippets": [ + "Hello @speedfl, thanks for the prompt reply,\r\n\r\n> @mrmm are you sure you want to create the method `renderWithGoTemplate`. Can the `Replace` in utils do the job ?\r\n\r\nYes totally, I have just tried to use the work of @rishabh625 (as it was his idea) but using `r.Replace` does the job (which I have updated in b91759c)\r\n\r\n> Concerning your tests as you are passing the `stringTemplate` you need to add test with `stringTemplate != nil` in the `utils_test.go`\r\n\r\nThanks for the pointer on how to test,", + "Hello @speedfl, thanks for the prompt reply,\r\n\r\n> @mrmm are you sure you want to create the method `renderWithGoTemplate`. Can the `Replace` in utils do the job ?\r\n\r\nYes totally, I have just tried to use the work of @rishabh625 (as it was his idea) but using `r.Replace` does the job (which I have updated in b91759c)\r\n\r\n> Concerning your tests as you are passing the `stringTemplate` you need to add test with `stringTemplate != nil` in the `utils_test.go`\r\n\r\nThanks for the pointer on how to test, here is my try https://github.com/mrmm/argo-cd/blob/437a9a82fc40e857ec4373297d7a521684bbdc87/applicationset/utils/utils_test.go#L19 *~but I seem to be running into an Unmarshaling issue for some reason I couldn't find.~*\r\n\r\n\r\n(Please let me know if you don't have the time to check this I will gladly stop pinging here 🙏 )\r\n\r\n-----\r\n\r\n>", + "*Edit: found my unmarshal mistake in https://github.com/argoproj/argo-cd/pull/11183/commits/437a9a82fc40e857ec4373297d7a521684bbdc87*\n# [Codecov](https://codecov.io/gh/argoproj/argo-cd/pull/11183?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) Report\nBase: **47.00**% // Head: **46.95**% // Decreases project coverage by **`-0.05%`** :warning:\n> Coverage data is based on head [(`fc26e2e`)](https://codecov.io/gh/argoproj/argo-cd/pull/11183?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) compared to base [(`9fe4ad3`)](https://codecov.io/gh/argoproj/argo-cd/commit/9fe4ad3253840761e418e7c76e85ef090ffa73fd?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n> Patch coverage: 58.82% of modified lines in pull request are covered.\n\n> :exclamation: Current head fc26e2e differs from pull request most recent head 21bbdcc. Consider uploading reports for the commit 21bbdcc to get more accurate results\n\n
Additional details and impacted files" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-cd/pull/11183", + "repo": "https://github.com/argoproj/argo-cd", + "pr": "https://github.com/argoproj/argo-cd/pull/9873" + }, + "reactions": 24, + "comments": 16, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:59.250Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-11567-feat-appset-advanced-templating-for-applicationset.json b/solutions/cncf-generated/argo/argo-11567-feat-appset-advanced-templating-for-applicationset.json new file mode 100644 index 00000000..72a23b5a --- /dev/null +++ b/solutions/cncf-generated/argo/argo-11567-feat-appset-advanced-templating-for-applicationset.json @@ -0,0 +1,77 @@ +{ + "version": "kc-mission-v1", + "name": "argo-11567-feat-appset-advanced-templating-for-applicationset", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo: feat(appset): Advanced templating for ApplicationSet", + "description": "@crenshaw-dev I think you were interested by this one\n\nCloses https://github.com/argoproj/argo-cd/issues/9177\n\nMain purpose is to have an ApplicationSet Template as `map[string]interface{}` which can be fully templatable\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: ApplicationSet\nmetadata:\n name: guestbook\nspec:\n goTemplate: true\n generators:\n - list:\n elements:\n - cluster: engineering-dev\n url: https://kubernetes.default.svc\n automated: true\n prune: true\n ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "@crenshaw-dev I think you were interested by this one\n\nCloses https://github.com/argoproj/argo-cd/issues/9177\n\nMain purpose is to have an ApplicationSet Template as `map[string]interface{}` which can " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nmetadata:\r\n name: guestbook\r\nspec:\r\n goTemplate: true\r\n generators:\r\n - list:\r\n elements:\r\n - cluster: engineering-dev\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: true\r\n - cluster: engineering-prod\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: false\r\n - cluster: engineering-debug\r\n url: https://kubernetes.default.svc\r\n aut\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Oh! But @mubarak-j, you could accomplish the same thing by doing this:\n\n```yaml\nkind: ApplicationSet\nspec:\n goTemplate: true\n template:\n spec:\n syncPolicy:\n '{{ ternary \"automated\" \"noAuto\" (eq (index .metadata.labels \"env\") \"staging\")}}': {}\n```\n\nMy bad, I was looking at the exact text of your example instead of the intention. :-)", + "codeSnippets": [ + "apiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nmetadata:\r\n name: guestbook\r\nspec:\r\n goTemplate: true\r\n generators:\r\n - list:\r\n elements:\r\n - cluster: engineering-dev\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: true\r\n - cluster: engineering-prod\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: false\r\n - cluster: engineering-debug\r\n url: https://kubernetes.default.svc\r\n aut", + "apiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nmetadata:\r\n name: guestbook\r\nspec:\r\n goTemplate: true\r\n generators:\r\n - list:\r\n elements:\r\n - cluster: engineering-dev\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: true\r\n - cluster: engineering-prod\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: false\r\n - cluster: engineering-debug\r\n url: https://kubernetes.default.svc\r\n automated: false\r\n prune: false\r\n template:\r\n metadata:\r\n name: '{{.cluster}}'\r\n spec:\r\n project: default\r\n source:\r\n repoURL: https://github.com/argoproj/argo-cd.git\r\n targetRevision: HEAD\r\n path: applicationset/examples/list-generator/guestbook/{{.cluster}}\r\n destination:\r\n server: '{{.url}}'\r\n namespace: guestbook\r\n syncPolicy:\r\n # If automated == true, it will generate a key 'automated' which is part of the Application Spec model. It will then be retained\r\n # If automated == false, it will generate a key 'noAuto' which is not part of the Application Spec model. It will then be ignored\r\n '{{ ternary \"automated\" \"noAuto\" .automated }}':\r\n # If prune == true, it will generate a key 'prune' which is part of the Application Spec model. It will then be retained\r\n # If prune == false, it will generate a key 'noprune' which is not part of the Application Spec model. It will then be ignored\r\n '{{ ternary \"prune\" \"noprune\" .prune }}': true", + "| [Impacted Files](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) | Coverage Δ | |\n|---|---|---|\n| [applicationset/generators/duck\\_type.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvZ2VuZXJhdG9ycy9kdWNrX3R5cGUuZ28=) | `70.18% <0.00%> (ø)` | |\n| [applicationset/generators/pull\\_request.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvZ2VuZXJhdG9ycy9wdWxsX3JlcXVlc3QuZ28=) | `52.23% <0.00%> (ø)` | |\n| [applicationset/generators/scm\\_provider.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvZ2VuZXJhdG9ycy9zY21fcHJvdmlkZXIuZ28=) | `34.19% <0.00%> (ø)` | |\n| [.../apis/application/v1alpha1/applicationset\\_types.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-cGtnL2FwaXMvYXBwbGljYXRpb24vdjFhbHBoYTEvYXBwbGljYXRpb25zZXRfdHlwZXMuZ28=) | `29.26% <ø> (+0.69%)` | :arrow_up: |\n| [util/argo/argo.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-dXRpbC9hcmdvL2FyZ28uZ28=) | `64.40% <0.00%> (-2.06%)` | :arrow_down: |\n| [cmd/argocd/commands/applicationset.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-Y21kL2FyZ29jZC9jb21tYW5kcy9hcHBsaWNhdGlvbnNldC5nbw==) | `18.25% <33.66%> (+1.19%)` | :arrow_up: |\n| [...licationset/generators/generator\\_spec\\_processor.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvZ2VuZXJhdG9ycy9nZW5lcmF0b3Jfc3BlY19wcm9jZXNzb3IuZ28=) | `64.00% <60.00%> (-2.30%)` | :arrow_down: |\n| [applicationset/utils/utils.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvdXRpbHMvdXRpbHMuZ28=) | `61.99% <60.60%> (-13.60%)` | :arrow_down: |\n| [...cationset/controllers/applicationset\\_controller.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvY29udHJvbGxlcnMvYXBwbGljYXRpb25zZXRfY29udHJvbGxlci5nbw==) | `63.15% <100.00%> (-0.29%)` | :arrow_down: |\n| [applicationset/generators/cluster.go](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj#diff-YXBwbGljYXRpb25zZXQvZ2VuZXJhdG9ycy9jbHVzdGVyLmdv) | `80.27% <100.00%> (ø)` | |\n| ... and [4 more](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj) | |\n\n\n
\n\n[:umbrella: View full report in Codecov by Sentry](https://codecov.io/gh/argoproj/argo-cd/pull/11567?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). \n:loudspeaker: Do you have feedback about the report comment? [Let us know in this issue](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n\n@speedfl you're a force of nature.\r\n\r\nI doubt I'll personally have time to review this in time for 2.6. But if the community likes the approach, I'd love to get it into 2.7.\n@crenshaw-dev can you explain how can we as community motivate you more to \"merge it asap as this is what we need\"? 😄\nInteresting approach. I think this is exactly what I need. Would we be to toggle `automated` by using only a single variable as in the example bellow?", + "@boedy yes. You Can take a look to examples and e2e\napologies if this repeating the same question, but can parameter values provided by various generators be used as a condition here, e.g labels provided by cluster generator?", + "> @crenshaw-dev can you explain how can we as community motivate you more to \"merge it asap as this is what we need\"? 😄\r\n\r\n@michalschott pay Intuit to clone me. ;-) But seriously, I'll be advocating with my PM to get my time assigned to this before 2.7. \r\n\r\n@mubarak-j templates outside of keys or string-type values are not part of this PR. For that, you'll need this: https://github.com/argoproj/argo-cd/pull/11183\nOh! But @mubarak-j, you could accomplish the same thing by doing this:" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-cd/pull/11567", + "repo": "https://github.com/argoproj/argo-cd" + }, + "reactions": 31, + "comments": 23, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:53.035Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-14893-feat-appset-advanced-templating-using-templatepatch.json b/solutions/cncf-generated/argo/argo-14893-feat-appset-advanced-templating-using-templatepatch.json new file mode 100644 index 00000000..a1591017 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-14893-feat-appset-advanced-templating-using-templatepatch.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "argo-14893-feat-appset-advanced-templating-using-templatepatch", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo: feat(appset): Advanced Templating using templatePatch", + "description": "@crenshaw-dev a small proposal for `patchTemplate`\n\nCloses https://github.com/argoproj/argo-cd/issues/9177\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.\n\nChecklist:\n\n* [x] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "@crenshaw-dev a small proposal for `patchTemplate`\n\nCloses https://github.com/argoproj/argo-cd/issues/9177\n\nNote on DCO:\n\nIf the DCO action in the integration test fails, one or more of your commits a" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nmetadata:\r\n name: guestbook\r\nspec:\r\n goTemplate: true\r\n generators:\r\n - list:\r\n elements:\r\n - cluster: engineering-dev\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: true\r\n - cluster: engineering-prod\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: false\r\n - cluster: engineering-debug\r\n url: https://kubernetes.default.svc\r\n aut\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/argoproj/argo-cd/pull/11567. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "@crenshaw-dev I think you were interested by this one\n\nCloses https://github.com/argoproj/argo-cd/issues/9177\n\nMain purpose is to have an ApplicationSet Template as `map[string]interface{}` which can be fully templatable\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: ApplicationSet\nmetadata:\n name: guestbook\nspec:\n goTemplate: true\n generators:\n - list:\n elements:\n - cluster: engineering-dev\n url: https://kubernetes.default.svc\n automated: true\n prune: true\n - cluster: engineering-prod\n url: https://kubernetes.default.svc\n automated: true\n prune: false\n - cluster: engineering-debug\n url: https://kubernetes.default.svc\n automated: false\n prune: false\n template:\n metadata:\n name: '{{.cluster}}'\n spec:\n project: default\n source:\n repoURL: https://github.com/argoproj/argo-cd.git\n targetRevision: HEAD\n path: applicationset/examples/list-generator/guestbook/{{.cluster}}\n destination:\n server: '{{.url}}'\n namespace: guestbook\n syncPolicy:\n # If automated == true, it will generate a key 'automated' which is part of the Application Spec model. It will then be retained\n # If automated == false, it will generate a key 'noAuto' which is not part of the Application Spec model. It will then be ignored\n '{{ ternary \"automated\" \"noAuto\" .automated }}':", + "codeSnippets": [ + "apiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nmetadata:\r\n name: guestbook\r\nspec:\r\n goTemplate: true\r\n generators:\r\n - list:\r\n elements:\r\n - cluster: engineering-dev\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: true\r\n - cluster: engineering-prod\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: false\r\n - cluster: engineering-debug\r\n url: https://kubernetes.default.svc\r\n aut", + "apiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nmetadata:\r\n name: guestbook\r\nspec:\r\n goTemplate: true\r\n generators:\r\n - list:\r\n elements:\r\n - cluster: engineering-dev\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: true\r\n - cluster: engineering-prod\r\n url: https://kubernetes.default.svc\r\n automated: true\r\n prune: false\r\n - cluster: engineering-debug\r\n url: https://kubernetes.default.svc\r\n automated: false\r\n prune: false\r\n template:\r\n metadata:\r\n name: '{{.cluster}}'\r\n spec:\r\n project: default\r\n source:\r\n repoURL: https://github.com/argoproj/argo-cd.git\r\n targetRevision: HEAD\r\n path: applicationset/examples/list-generator/guestbook/{{.cluster}}\r\n destination:\r\n server: '{{.url}}'\r\n namespace: guestbook\r\n syncPolicy:\r\n # If automated == true, it will generate a key 'automated' which is part of the Application Spec model. It will then be retained\r\n # If automated == false, it will generate a key 'noAuto' which is not part of the Application Spec model. It will then be ignored\r\n '{{ ternary \"automated\" \"noAuto\" .automated }}':\r\n # If prune == true, it will generate a key 'prune' which is part of the Application Spec model. It will then be retained\r\n # If prune == false, it will generate a key 'noprune' which is not part of the Application Spec model. It will then be ignored\r\n '{{ ternary \"prune\" \"noprune\" .prune }}': true", + "\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/argoproj/argo-cd/pull/14893?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=argoproj).\n\nFor folks watching this issue: I really, really wanted to get it into 2.9 but ran out of time. We did manage to merge this slightly less-powerful tool, which should be sufficient for some use cases: https://github.com/argoproj/argo-cd/pull/14743\n@crenshaw-dev #14743 will save a lot of time. Thanks a lot. In parallel I will rebase this one and keep it ready for 2.10\nHiya, I am having a problem where if `goTemplate: true` is in there my second generator can't get values from my first.", + "The above works fine without `goTemplate: true` but with it I get", + "@sethgupton-mastery as you are using goTemplate you must use `testfiles-argocd/**/{{ .folderName }}/app.json`\nSweet. That fixed me up. I hadn't realized goTemplate was something that already existed and wasn't part of this PR.\r\nWith goTemplate set to true and me using goTemplate correctly... *drum roll*\r\n\r\nI was able to load a dynamic list of labels from my app.json file! Very exciting! Can't wait for this to get in.\r\n\r\nThanks speedfl.\nI just read through the docs section and this feature looks really cool and would help out a ton! :rocket: Is this slated for `2.10`?\nYep, slated for 2.10. Starting to review in earnest now. :-) \ni'm trying this feature using this article : https://medium.com/@geoffrey.muselli/argocd-multi-cluster-helm-charts-installation-in-mono-repo-0a406ff7c578\r\n\r\nbuilding an image based on 2.10, get for each time get Error from server (BadRequest): error when creating \"demo/argo-applicationset-template/applicationset-argo-monocluster.yaml\": ApplicationSet in version \"v1alpha1\" cannot be handled as a ApplicationSet: strict decoding error: unknown field \"Generator\"\nCan you open a bug ?\r\nPlease add as well the ApplicationSet you are trying to create and the logs from the appset controller ? You can tag me. I will try to have a look \n> Can you open a bug ? Please add as well the ApplicationSet you are trying to create and the logs from the appset controller ? You can tag me. I will try to have a look\r\n\r\nmade a mistake in my config , sorry\nit's working for multi cluster, i just make some little adjustement , but enable to use the feature flag enabled.\r\n\r\nthis is my applicationset, with that, i can get two clone on different cluster\r\n\r\n`\r\napiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nmetadata:\r\n name: hello-world-clusters\r\n namespace: kube-infra\r\nspec:\r\n goTemplate: true\r\n generators:\r\n - matrix:\r\n generators:\r\n - clusters: {}\r\n \r\n - git:\r\n repoURL: git@github.com:didlawowo/pulumi.git\r\n revision: feat/crossplane-ml\r\n files:\r\n - path: 'continuous-delivery/clusters-apps/hello-world/.argocd.json'\r\n\r\n template:\r\n metadata:\r\n name: '{{ .name }}-{{ .path.basename }}'\r\n spec:\r\n project: default\r\n sources:\r\n - repoURL: git@github.com:didlawowo/pulumi.git\r\n targetRevision: '{{ dig \"clusters\" .name \"valuesRevision\" \"feat/crossplane-ml\" . }}'\r\n ref: values\r\n - repoURL: '{{ .source.repoURL }}'\r\n targetRevision: '{{ dig \"clusters\" .name \"chartRevision\" .source.targetRevision . }}'\r\n chart: '{{ default \"\" .source.chart }}'\r\n path: '{{ default \"\" .source.path }}'\r\n helm:\r\n releaseName: '{{ .path.basename }}'\r\n valueFiles:\r\n - $values/{{ .path.path }}/values.yaml\r\n - $values/{{ .path.path }}/values.{{ .name }}.yaml\r\n destination:\r\n server: https://kubernetes.default.svc\r\n namespace: '{{ .destination.namespace }}'\r\n syncPolicy:\r\n automated:\r\n prune: true\r\n syncOptions:\r\n - CreateNamespace={{ dig \"syncPolicy\" \"syncOptions\" \"CreateNamespace\" \"true\" . }}\r\n`\r\ni have tried many tips, but not working, any help was really usefull\nThis is a support question. Could you please ask it on the cncf slack. Section argo-cd-appset. Thanks 🙂\nThanks for implementing this.\r\n\r\nWas any consideration given to the multiple source use case? i.e. the patching of a source that is a list element under `sources`? It would be great if it behaved like a Kustomize strategic merge patch and appended the patched source rather than replacing the entire contents of `sources`. It would be even better if it could merge the list elements on a known key (perhaps `ref`) so we would only have to patch one field. Then the bulk of the template could be under `template` as an object rather than a giant string under `templatePatch`.\r\n\r\nAn example of a desired ApplicationSet that doesn't currently work:" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-cd/pull/14893", + "repo": "https://github.com/argoproj/argo-cd", + "pr": "https://github.com/argoproj/argo-cd/pull/11567" + }, + "reactions": 55, + "comments": 19, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:42.473Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-6280-feat-extra-helm-values-from-external-git-repo-5826.json b/solutions/cncf-generated/argo/argo-6280-feat-extra-helm-values-from-external-git-repo-5826.json new file mode 100644 index 00000000..7be73bd8 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-6280-feat-extra-helm-values-from-external-git-repo-5826.json @@ -0,0 +1,75 @@ +{ + "version": "kc-mission-v1", + "name": "argo-6280-feat-extra-helm-values-from-external-git-repo-5826", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo: feat: Extra Helm values from external git repo #5826", + "description": "# Feature: External Helm values from git\n\nThis PR allows for external values.yaml from other git repos in a helm installation. \n\n**Sample application.yaml**\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: Application\nmetadata:\n name: external-test\n finalizers:\n - resources-finalizer.argocd.argoproj.io\nspec:\n project: default\n source:\n repoURL: https://charts.bitnami.com/bitnami\n targetRevision: 8.5.8\n helm:\n valueFiles:\n - values.yaml\n externalValueFiles:\n ", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "# Feature: External Helm values from git\n\nThis PR allows for external values.yaml from other git repos in a helm installation. \n\n**Sample application.yaml**\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nk" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: argoproj.io/v1alpha1\r\nkind: Application\r\nmetadata:\r\n name: external-test\r\n finalizers:\r\n - resources-finalizer.argocd.argoproj.io\r\nspec:\r\n project: default\r\n source:\r\n repoURL: https://charts.bitnami.com/bitnami\r\n targetRevision: 8.5.8\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n externalValueFiles:\r\n - repoURL: https://github.com/KaiReichart/argo-test-values.git\r\n targetRevision: main\r\n valueFiles:\r\n - values.yaml\r\n \n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> @rouke-broersma this is exactly what the field `targetRevision` is for. You could have tags (or branches) in your repos that contain the version specific value files, and then reference these in your `application.yaml`.\n> So you can either reference `main` in `application.yaml` and manually keep your 2 repos in sync or reference a specific version branch and update `application.yaml` when you want to update the application.\n> \n> However this isn't a problem specific to this implementation, but rather to the whole approach of splitting chart and values into different repos.\n\nWe are planning to put the application.yaml and the values.yaml in the same repo. Argo could theoretically (perhaps argo already does this?) tag the application with the git revision it comes from (app-of-apps pattern). If I could then specify in the application something akin to\n\n```\nexternalValueFiles:\n- fromApplicationRepo: true\n withApplicationRepoRevision: true\n valueFiles:\n - values.yaml\n```\n\nThat would be epic and solve the problem of keeping them in sync (if you use the same repo for the application and values).\n\nI understand of course that this is not what you're contributing in the PR but do you think it would be a feasible solution to the sync issue?", + "codeSnippets": [ + "apiVersion: argoproj.io/v1alpha1\r\nkind: Application\r\nmetadata:\r\n name: external-test\r\n finalizers:\r\n - resources-finalizer.argocd.argoproj.io\r\nspec:\r\n project: default\r\n source:\r\n repoURL: https://charts.bitnami.com/bitnami\r\n targetRevision: 8.5.8\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n externalValueFiles:\r\n - repoURL: https://github.com/KaiReichart/argo-test-values.git\r\n targetRevision: main\r\n valueFiles:\r\n - values.yaml", + "apiVersion: argoproj.io/v1alpha1\r\nkind: Application\r\nmetadata:\r\n name: external-test\r\n finalizers:\r\n - resources-finalizer.argocd.argoproj.io\r\nspec:\r\n project: default\r\n source:\r\n repoURL: https://charts.bitnami.com/bitnami\r\n targetRevision: 8.5.8\r\n helm:\r\n valueFiles:\r\n - values.yaml\r\n externalValueFiles:\r\n - repoURL: https://github.com/KaiReichart/argo-test-values.git\r\n targetRevision: main\r\n valueFiles:\r\n - values.yaml\r\n chart: mysql\r\n destination:\r\n server: 'https://kubernetes.default.svc'\r\n namespace: default", + "That would be epic and solve the problem of keeping them in sync (if you use the same repo for the application and values).\r\n\r\nI understand of course that this is not what you're contributing in the PR but do you think it would be a feasible solution to the sync issue?\n@KaiReichart Did you want me to send you another PR to fix the codegen conflict since I caused it? We really need to coordinate this w/ approval and merge, or else it's going to keep cropping up as other PRs get merged. \n@tinkerborg sure please do, this is only going to be a case of running `make codegen` to generate the necessary files. \n> @tinkerborg sure please do, this is only going to be a case of running `make codegen` to generate the necessary files.\r\n\r\nHmm, actually your repo needs to be updated w/ upstream master. I tried fetching your branch, merging argoproj/master and running the codegen. The conflict resolved fine but the commit history on the PR was a mess. Might be easier for you to do it. This is going to need to be repeated every time an upstream change involves codegen, until this branch is merged though...\nI'll do it, but this is the 3rd time I'm doing this and none of the ArgoCD maintainers has even acknowledged this PR, so I'm a bit hesitant about always keeping it up to date with the generated code until the maintainers at least signal any kind of interest in this feature...\n> > @rouke-broersma this is exactly what the field `targetRevision` is for. You could have tags (or branches) in your repos that contain the version specific value files, and then reference these in your `application.yaml`.\r\n> > So you can either reference `main` in `application.yaml` and manually keep your 2 repos in sync or reference a specific version branch and update `application.yaml` when you want to update the application.\r\n> > However this isn't a problem specific to this implementation, but rather to the whole approach of splitting chart and values into different repos.\r\n> \r\n> We are planning to put the application.yaml and the values.yaml in the same repo. Argo could theoretically (perhaps argo already does this?) tag the application with the git revision it comes from (app-of-apps pattern). If I could then specify in the application something akin to\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition", + "deploy" + ], + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "advanced", + "issueTypes": [ + "deploy" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-cd/pull/6280", + "repo": "https://github.com/argoproj/argo-cd" + }, + "reactions": 144, + "comments": 61, + "synthesizedBy": "regex", + "qualityScore": 70 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:36.785Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/argo/argo-9755-feat-applications-in-any-namespace.json b/solutions/cncf-generated/argo/argo-9755-feat-applications-in-any-namespace.json new file mode 100644 index 00000000..0fcda4c7 --- /dev/null +++ b/solutions/cncf-generated/argo/argo-9755-feat-applications-in-any-namespace.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "argo-9755-feat-applications-in-any-namespace", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "argo: feat: Applications in any namespace", + "description": "This change enables Application resources to exist in any namespace allowed by configuration.\n\nThe feature is *not* enabled by default, and has to be explicitly enabled by the administrator (see below).\n\nThe change aims to be fully backwards compatible.\n\nThis is a rather large change. It comprises changes to the controller's reconciliation logic, the API server as well as changes to the CLI and the UI. I try to outline the changes and design decisions best I can in the below description.\n\nThis P", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This change enables Application resources to exist in any namespace allowed by configuration.\n\nThe feature is *not* enabled by default, and has to be explicitly enabled by the administrator (see below" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nhttps://argocd.example.com/applications/guestbook\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/argoproj/argo-cd/pull/6537. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR is a PoC to prove that the proposal created at #6409 actually can be implemented with changes that are not too intrusive. \n\nDespite the PR looks big (>=48 files changes), most of the changes are about how things (e.g. application names) are referenced (e.g. application name). There's not much changes to complexity imho.\n\nThis shall not be merged. It serves merely as a demonstration.", + "codeSnippets": [ + "https://argocd.example.com/applications/guestbook", + "https://argocd.example.com/applications/guestbook", + "https://argocd.example.com/applications/argocd/guestbook", + "GET /api/v1/applications/guestbook?appNamespace=foo", + "argocd app get ns1/guestbook\r\nargocd app get ns2/guestbook\r\nargocd app get guestbook" + ] + } + }, + "metadata": { + "tags": [ + "argo", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "argo" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/argoproj/argo-cd/pull/9755", + "repo": "https://github.com/argoproj/argo-cd", + "pr": "https://github.com/argoproj/argo-cd/pull/6537" + }, + "reactions": 37, + "comments": 48, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with argo installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:48.140Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-2002-fix-retry-files-requests-to-github.json b/solutions/cncf-generated/atlantis/atlantis-2002-fix-retry-files-requests-to-github.json new file mode 100644 index 00000000..f1f9f400 --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-2002-fix-retry-files-requests-to-github.json @@ -0,0 +1,78 @@ +{ + "version": "kc-mission-v1", + "name": "atlantis-2002-fix-retry-files-requests-to-github", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "atlantis: fix: retry /files/ requests to github", + "description": "Similar to #1131, we see this for /files/ too, resulting in a plan\nerror.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Similar to #1131, we see this for /files/ too, resulting in a plan\nerror." + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nGET https://api.github.com/repos/{owner}/{repo}/pulls/{number}/files?per_page=300: 404 Not Found []\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/runatlantis/atlantis/pull/2013. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This is a follow on to resolve similar issues to #1019.\n\nIn #1131 retries were added to GetPullRequest. And in #1810 a backoff was included.\n\nHowever, those only resolve one potential request at the very beginning of a PR creation. The other request that happens early on during auto-plan is one to ListFiles to detect the modified files. This too can sometimes result in a 404 due to async updates on the GitHub side.\n\n---\n\nMy team recently upgraded a few Atlantis instances that were pretty old. They didn't yet include the fixes described above.\n\nWe upgraded to v0.18.1. After upgrading we were hopeful our dev teams would be happy to know these race condition errors were a thing of the past. But in only a couple of days, we got another report!\n\nI was able to find an error log with the following message (org/repo/pr-number redacted):\n```\nGET https://api.github.com/repos/{owner}/{repo}/pulls/{number}/files?per_page=300: 404 Not Found []\n```\n\nAnd the following stacktrace:\n```\ngithub.com/runatlantis/atlantis/server/events.(*PullUpdater).updatePull\n\tgithub.com/runatlantis/atlantis/server/events/pull_updater.go:14\ngithub.com/runatlantis/atlantis/server/events.(*PlanCommandRunner).runAutoplan\n\tgithub.com/runatlantis/atlantis/server/events/plan_command_runner.go:77\ngithub.com/runatlantis/atlantis/server/events.(*PlanCommandRunner).Run\n\tgithub.com/runatlantis/atlantis/server/events/plan_command_runner.go:221\ngithub.com/runatlantis/atlantis/server/events.(*DefaultCommandRunner).RunAutoplan", + "codeSnippets": [ + "GET https://api.github.com/repos/{owner}/{repo}/pulls/{number}/files?per_page=300: 404 Not Found []", + "GET https://api.github.com/repos/{owner}/{repo}/pulls/{number}/files?per_page=300: 404 Not Found []", + "github.com/runatlantis/atlantis/server/events.(*PullUpdater).updatePull\r\n\tgithub.com/runatlantis/atlantis/server/events/pull_updater.go:14\r\ngithub.com/runatlantis/atlantis/server/events.(*PlanCommandRunner).runAutoplan\r\n\tgithub.com/runatlantis/atlantis/server/events/plan_command_runner.go:77\r\ngithub.com/runatlantis/atlantis/server/events.(*PlanCommandRunner).Run\r\n\tgithub.com/runatlantis/atlantis/server/events/plan_command_runner.go:221\r\ngithub.com/runatlantis/atlantis/server/events.(*DefaultCommandRunner).RunAutoplanCommand\r\n\tgithub.com/runatlantis/atlantis/server/events/command_runner.go:163" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/runatlantis/atlantis/pull/2002", + "repo": "https://github.com/runatlantis/atlantis", + "pr": "https://github.com/runatlantis/atlantis/pull/2013" + }, + "reactions": 12, + "comments": 3, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with atlantis installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:39:07.898Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/atlantis/atlantis-2261-feat-stream-output-for-custom-workflows.json b/solutions/cncf-generated/atlantis/atlantis-2261-feat-stream-output-for-custom-workflows.json new file mode 100644 index 00000000..51d3a38f --- /dev/null +++ b/solutions/cncf-generated/atlantis/atlantis-2261-feat-stream-output-for-custom-workflows.json @@ -0,0 +1,78 @@ +{ + "version": "kc-mission-v1", + "name": "atlantis-2261-feat-stream-output-for-custom-workflows", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "atlantis: feat: stream output for custom workflows", + "description": "I noticed that in #1937 which originally added this feature (e.g. https://github.com/runatlantis/atlantis/pull/1937/files#diff-edf527ba8643ff7bfca5f560491ea7055af472f5d6f3bbda127f1776b63d4b06L179) that the documentation around setting up `terragrunt` for custom workflows removed the `-no-color` option.\n\nI'm not sure if this was by mistake, but to allow colorization in the in-browser terminal I've added `ansi.Strip()` for parsing all command output from the new runner abstraction. If this is out ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "I noticed that in #1937 which originally added this feature (e.g. https://github.com/runatlantis/atlantis/pull/1937/files#diff-edf527ba8643ff7bfca5f560491ea7055af472f5d6f3bbda127f1776b63d4b06L179) tha" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nrepos:\r\n - id: \"/.*/\"\r\n workflow: terragrunt\r\n pre_workflow_hooks:\r\n - run: >\r\n terragrunt-atlantis-config generate\r\n --output atlantis.yaml\r\n --autoplan --automerge\r\nworkflows:\r\n terragrunt:\r\n plan:\r\n steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - run: terragrunt plan -out=$PLANFILE\r\n - run: terragrunt show -json $PLANFILE > $SHOWFILE\r\n apply:\r\n step\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "@mustafa89 thanks for the details. Looking at the built-in terraform client, it seems to append `-input=false` to `plan` and `apply` commands.\n\nDoes it work for you if you add the same flags to your workflow? For example:\n\n```yaml\nworkflows:\n terragrunt:\n plan:\n steps:\n - env:\n name: TERRAGRUNT_TFPATH\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\n - run: terragrunt plan -input=false -no-color -out=$PLANFILE\n - run: terragrunt show -no-color -json $PLANFILE > $SHOWFILE\n apply:\n steps:\n - env:\n name: TERRAGRUNT_TFPATH\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\n - run: terragrunt apply -input=false $PLANFILE -no-color\n```\n\nIf so, then this is probably more of a documentation fix than something that can be done in code, since we're running arbitrary shell commands (with the `run:` config) and probably shouldn't be mangling those without the user's knowledge.", + "codeSnippets": [ + "repos:\r\n - id: \"/.*/\"\r\n workflow: terragrunt\r\n pre_workflow_hooks:\r\n - run: >\r\n terragrunt-atlantis-config generate\r\n --output atlantis.yaml\r\n --autoplan --automerge\r\nworkflows:\r\n terragrunt:\r\n plan:\r\n steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - run: terragrunt plan -out=$PLANFILE\r\n - run: terragrunt show -json $PLANFILE > $SHOWFILE\r\n apply:\r\n step", + "repos:\r\n - id: \"/.*/\"\r\n workflow: terragrunt\r\n pre_workflow_hooks:\r\n - run: >\r\n terragrunt-atlantis-config generate\r\n --output atlantis.yaml\r\n --autoplan --automerge\r\nworkflows:\r\n terragrunt:\r\n plan:\r\n steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - run: terragrunt plan -out=$PLANFILE\r\n - run: terragrunt show -json $PLANFILE > $SHOWFILE\r\n apply:\r\n steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - run: terragrunt apply $PLANFILE", + "version: 3\r\nautomerge: true\r\ndelete_source_branch_on_merge: true\r\nprojects:\r\n - name: aibse\r\n dir: teams/aibse\r\n workflow: terragrunt\r\n terraform_version: v1.2.2\r\n autoplan:\r\n when_modified: [\"*.tf\", \"*.hcl\"]\r\n enabled: true\r\n - name: platform\r\n dir: teams/platform\r\n workflow: terragrunt\r\n terraform_version: v1.2.2\r\n autoplan:\r\n when_modified: [\"*.tf\", \"*.hcl\"]\r\n enabled: true\r\nworkflows:\r\n terragrunt:\r\n plan:\r\n steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - run: terragrunt plan -out=$PLANFILE\r\n - run: terragrunt show -json $PLANFILE > $SHOWFILE\r\n apply:\r\n steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - run: terragrunt apply $PLANFILE", + "# repos lists the config for specific repos.\r\nrepos:\r\n # id can either be an exact repo ID or a regex.\r\n # If using a regex, it must start and end with a slash.\r\n # Repo ID's are of the form {VCS hostname}/{org}/{repo name}, ex.\r\n # github.com/runatlantis/atlantis.\r\n - id: /.*/\r\n # branch is an regex matching pull requests by base branch\r\n # (the branch the pull request is getting merged into).\r\n # By default, all branches are matched\r\n branch: main\r\n # apply_requirements sets the Apply Requirements for all repos that match.\r\n apply_requirements: [mergeable, undiverged]\r\n # workflow sets the workflow for all repos that match.\r\n # This workflow must be defined in the workflows section.\r\n workflow: default\r\n # allowed_overrides specifies which keys can be overridden by this repo in\r\n # its atlantis.yaml file.\r\n allowed_overrides: [apply_requirements, workflow, delete_source_branch_on_merge]\r\n # allowed_workflows specifies which workflows the repos that match\r\n # are allowed to select.\r\n allowed_workflows: [default]\r\n # allow_custom_workflows defines whether this repo can define its own\r\n # workflows. If false (default), the repo can only use server-side defined\r\n # workflows.\r\n allow_custom_workflows: true\r\n # delete_source_branch_on_merge defines whether the source branch would be deleted on merge\r\n # If false (default), the source branch won't be deleted on merge\r\n delete_source_branch_on_merge: false\r\n # pre_workflow_hooks defines arbitrary list of scripts to execute before workflow execution.\r\n #pre_workflow_hooks:\r\n # - run: my-pre-workflow-hook-command arg1\r\n # post_workflow_hooks defines arbitrary list of scripts to execute after workflow execution.\r\n #post_workflow_hooks:\r\n # - run: my-post-workflow-hook-command arg1\r\n# workflows lists server-side custom workflows\r\nworkflows:\r\n default:\r\n plan:\r\n steps:\r\n - init\r\n - plan\r\n apply:\r\n steps: [apply]", + "steps:\r\n - env:\r\n name: TERRAGRUNT_TFPATH\r\n command: 'echo \"terraform${ATLANTIS_TERRAFORM_VERSION}\"'\r\n - env:\r\n name: TERRAGRUNT_AUTO_INIT\r\n value: false\r\n - run: terragrunt init -input=false -no-color\r\n - run: if [[ $WORKSPACE != \"default\" ]]; then terragrunt workspace select -no-color $WORKSPACE; fi\r\n - run: terragrunt validate -no-color\r\n - run: terragrunt plan -no-color -out=$PLANFILE\n@frank-bee thanks for the info. Out of curiosity, are you saying that these problems happen *only* when running Atlantis from this branch?\r\n\r\nJust trying to rule things out, since it's not really clear to me how the changes here could affect the locking behavior.\n@frank-bee if there is something kindly update here. This PR missed the pre release and we would very much like to have it.\n@ascandella and @mustafa89 I have no updates here. Also cannot reproduce it at the moment because I switched to the released atlantis and do not work with terragrunt anymore.\r\nIf nobody else faced thesee kind of problems, please merge and close this PR!\n@jamengual can we get this into the next release? Our team would love to start using this feature\n> @jamengual can we get this into the next release? Our team would love to start using this feature\r\n\r\nI'm not the only one that needs to review the code, so we need to wait for someone else to review it.\n@jamengual who else can review? This PR's been sitting for a bit and I know folks have been waiting for this feature.\nI was testing this in the pre-release and it seems the same problem @frank-bee has raised happened to us. I saw multiple MRs where one of the workspaces started showing the same" + ] + } + }, + "metadata": { + "tags": [ + "atlantis", + "sandbox", + "app-definition", + "troubleshoot", + "feature", + "waiting-on-review", + "terragrunt" + ], + "cncfProjects": [ + "atlantis" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/runatlantis/atlantis/pull/2261", + "repo": "https://github.com/runatlantis/atlantis" + }, + "reactions": 17, + "comments": 25, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with atlantis installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:39:00.109Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/backstage/backstage-6341-techdocs-use-an-event-stream-for-the-sync-endpoint-to-give-access.json b/solutions/cncf-generated/backstage/backstage-6341-techdocs-use-an-event-stream-for-the-sync-endpoint-to-give-access.json new file mode 100644 index 00000000..f4259286 --- /dev/null +++ b/solutions/cncf-generated/backstage/backstage-6341-techdocs-use-an-event-stream-for-the-sync-endpoint-to-give-access.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "backstage-6341-techdocs-use-an-event-stream-for-the-sync-endpoint-to-give-access", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "backstage: [TechDocs] Use an event-stream for the sync endpoint to give access to the logs of the sync process", + "description": "We are working on the UX/DX of the \"basic\" architecture of TechDocs (see also https://github.com/backstage/backstage/pull/6263). Our goal is to have the option to run the docs generation in Backstage instead of in the CI. We did some work to create a trusted execution environment for the generation, but we want the user to be able to access the logs of the process. Which is the scope of this PR 😀.\n\nThis PR includes multiple changes. Each is part of individual commits, so it might be easier to r", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "We are working on the UX/DX of the \"basic\" architecture of TechDocs (see also https://github.com/backstage/backstage/pull/6263). Our goal is to have the option to run the docs generation in Backstage " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nhttps://github.com/backstage/example/tree/main/\r\n |- catalog-info.yaml\r\n | > apiVersion: backstage.io/v1alpha1\r\n | > kind: Component\r\n | > metadata:\r\n | > name: example\r\n | > annotations:\r\n | > backstage.io/techdocs-ref: . # -> same folder\r\n | > spec: {}\r\n |- docs/\r\n |- mkdocs.yml\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/backstage/backstage/pull/6495. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> This description was the initial draft. See the changeset for more details about the scope of the change.\n\nRelates to #4409\n\nThis PR is an outcome of the discussions around #6263. We want to make it easy to reference TechDocs documentation that is stored in the same folder as the entity definition. I first proposed `url:.`, which was a bit weird. Then @iamEAP proposed to stick with `dir:.`. Finally, @freben noted that it would also be cool to just write `backstage.io/techdocs-ref: .`. After some experimenting, I figured out that `backstage.io/techdocs-ref: ./` is just an alias for the longer `backstage.io/techdocs-ref: dir:./`. And that's what I implemented here 😀.\n\nI cleaned up the original `dir` implementation to only support `url` and `file` targets. When we also drop support for the deprecated `github`, ... annotations we can also delete a lot of now unneeded helper functions. Note that `backstage.io/techdocs-ref: https://github.com...` is **not** supported and will throw an exception that targets the user to use `url:https://github.com...` instead.\n\nI didn't change any documentation or examples yet. But I could add it to this PR, or we do a followup that focuses on this part.\n\n---\n\nIntroduce the annotation `backstage.io/techdocs-ref: ` as an alias for `backstage.io/techdocs-ref: dir:`. This annotation works with both the basic and the recommended flow, however, it will be most useful with the basic approach.\n\nIn addition,", + "codeSnippets": [ + "https://github.com/backstage/example/tree/main/\r\n |- catalog-info.yaml\r\n | > apiVersion: backstage.io/v1alpha1\r\n | > kind: Component\r\n | > metadata:\r\n | > name: example\r\n | > annotations:\r\n | > backstage.io/techdocs-ref: . # -> same folder\r\n | > spec: {}\r\n |- docs/\r\n |- mkdocs.yml", + "https://github.com/backstage/example/tree/main/\r\n |- catalog-info.yaml\r\n | > apiVersion: backstage.io/v1alpha1\r\n | > kind: Component\r\n | > metadata:\r\n | > name: example\r\n | > annotations:\r\n | > backstage.io/techdocs-ref: . # -> same folder\r\n | > spec: {}\r\n |- docs/\r\n |- mkdocs.yml", + "https://bitbucket.org/my-owner/my-project/src/master/\r\n |- catalog-info.yaml\r\n | > apiVersion: backstage.io/v1alpha1\r\n | > kind: Component\r\n | > metadata:\r\n | > name: example\r\n | > annotations:\r\n | > backstage.io/techdocs-ref: ./some-folder # -> subfolder\r\n | > spec: {}\r\n |- some-folder/\r\n |- docs/\r\n |- mkdocs.yml", + "https://dev.azure.com/organization/project/_git/repository\r\n |- my-1st-module/\r\n |- catalog-info.yaml\r\n | > apiVersion: backstage.io/v1alpha1\r\n | > kind: Component\r\n | > metadata:\r\n | > name: my-1st-module\r\n | > annotations:\r\n | > backstage.io/techdocs-ref: . # -> same folder\r\n | > spec: {}\r\n |- docs/\r\n |- mkdocs.yml\r\n |- my-2nd-module/\r\n |- catalog-info.yaml\r\n | > apiVersion: backstage.io/v1alpha1\r\n | > kind: Component\r\n | > metadata:\r\n | > name: my-2nd-module\r\n | > annotations:\r\n | > backstage.io/techdocs-ref: . # -> same folder\r\n | > spec: {}\r\n |- docs/\r\n |- mkdocs.yml\r\n |- catalog-info.yaml\r\n | > apiVersion: backstage.io/v1alpha1\r\n | > kind: Location\r\n | > metadata:\r\n | > name: example\r\n | > spec:\r\n | > targets:\r\n | > - ./*/catalog-info.yaml" + ] + } + }, + "metadata": { + "tags": [ + "backstage", + "incubating", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "backstage" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/backstage/backstage/pull/6341", + "repo": "https://github.com/backstage/backstage", + "pr": "https://github.com/backstage/backstage/pull/6495" + }, + "reactions": 8, + "comments": 7, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with backstage installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:36:16.390Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/bank-vaults/bank-vaults-1613-operator-filter-vault-operator-cache-to-reduce-memory-usage.json b/solutions/cncf-generated/bank-vaults/bank-vaults-1613-operator-filter-vault-operator-cache-to-reduce-memory-usage.json new file mode 100644 index 00000000..17f58be3 --- /dev/null +++ b/solutions/cncf-generated/bank-vaults/bank-vaults-1613-operator-filter-vault-operator-cache-to-reduce-memory-usage.json @@ -0,0 +1,83 @@ +{ + "version": "kc-mission-v1", + "name": "bank-vaults-1613-operator-filter-vault-operator-cache-to-reduce-memory-usage", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "bank-vaults: operator: Filter vault-operator cache to reduce memory usage", + "description": "| Q | A\n| --------------- | ---\n| Bug fix? | yes\n| New feature? | no\n| API breaks? | no\n| Deprecations? | no\n| Related tickets | fixes #1288 \n| License | Apache 2.0\n\n### What's in this PR?\nUpgrade controller-runtime to latest release (v0.11.2) and filter cache to only target those resources created by the controller/labeled with common keys. `app.kubernetes.io/name in (vault, vault-configurator)`\n\n### Why?\nIn clusters with large number of configmaps, vault-o", + "type": "analyze", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "| Q | A\n| --------------- | ---\n| Bug fix? | yes\n| New feature? | no\n| API breaks? | no\n| Deprecations? | no\n| Related tickets | fixes #1288 \n| License | Apache 2" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n# k top pods -n keos-core vault-operator-86b8bf4596-gqpgr\r\nNAME CPU(cores) MEMORY(bytes) \r\nvault-operator-86b8bf4596-gqpgr 85m 2091Mi\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/bank-vaults/bank-vaults/pull/1621. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "| Q | A\n| --------------- | ---\n| Bug fix? | yes\n| New feature? | no\n| API breaks? | no\n| Deprecations? | no\n| Related tickets | fixes #1581\n| License | Apache 2.0\n\n### What's in this PR?\nConvert `map[interface{}]interface{}` to `map[string]interface{}` before sending the config to Vault API.\nThat's because the config data can have a sub dict (like `provider_config` in JWT/OIDC).\n\nWithout this conversion, Vault API will retrun the following error:\n```\njson: unsupported type: map[interface {}]interface {}\n```\n\n### Why?\nThis issue has been already fixed in #1247 however, it was removed (unintentionally) in the refactoring of v1.5 which caused #1581.\n\n### Additional context\n\n### Checklist", + "codeSnippets": [ + "# k top pods -n keos-core vault-operator-86b8bf4596-gqpgr\r\nNAME CPU(cores) MEMORY(bytes) \r\nvault-operator-86b8bf4596-gqpgr 85m 2091Mi", + "# k top pods -n keos-core vault-operator-86b8bf4596-gqpgr\r\nNAME CPU(cores) MEMORY(bytes) \r\nvault-operator-86b8bf4596-gqpgr 85m 2091Mi", + "# k top pods -n keos-core vault-operator-65d6c98759-wvbwp \r\nNAME CPU(cores) MEMORY(bytes) \r\nvault-operator-65d6c98759-wvbwp 2m 33Mi", + "json: unsupported type: map[interface {}]interface {}", + "+ kubectl get pods -A\r\nNAMESPACE NAME READY STATUS RESTARTS AGE\r\ndefault vault-0 3/3 Running 0 31s\r\ndefault vault-configurer-6947959c6f-9tz4z 0/1 ContainerCreating 0 0s\r\ndefault vault-configurer-6f7fbd89b8-qk2fz 0/1 CrashLoopBackOff 2 (14s ago) 31s\r\ndefault vault-operator-7bb56[86](https://github.com/banzaicloud/bank-vaults/runs/6428037465?check_suite_focus=true#step:8:86)d9b-mdh6s 1/1 Running 0 3m36s" + ] + } + }, + "metadata": { + "tags": [ + "bank-vaults", + "sandbox", + "app-definition", + "analyze" + ], + "cncfProjects": [ + "bank-vaults" + ], + "targetResourceKinds": [ + "Pod", + "Configmap" + ], + "difficulty": "intermediate", + "issueTypes": [ + "analyze" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/bank-vaults/bank-vaults/pull/1613", + "repo": "https://github.com/bank-vaults/bank-vaults", + "pr": "https://github.com/bank-vaults/bank-vaults/pull/1621" + }, + "reactions": 4, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with bank-vaults installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:39:12.959Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/buildpacks/buildpacks-2086-implementation-of-the-multi-platform-support-for-builders-and-bu.json b/solutions/cncf-generated/buildpacks/buildpacks-2086-implementation-of-the-multi-platform-support-for-builders-and-bu.json new file mode 100644 index 00000000..d95b6fb1 --- /dev/null +++ b/solutions/cncf-generated/buildpacks/buildpacks-2086-implementation-of-the-multi-platform-support-for-builders-and-bu.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "buildpacks-2086-implementation-of-the-multi-platform-support-for-builders-and-bu", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "buildpacks: Implementation of the multi-platform support for builders and buildpack packages RFC 0128", + "description": "The purpose of this PR is to implement the RFC [0128](https://github.com/buildpacks/rfcs/blob/main/text/0128-multiarch-builders-and-package.md). \n\nIt adds the capability to the `pack buildpack package` and `pack builder create` to generate multi-platform OCI images and create an image index to combine them.\n\nA draft version of this PR was demo during KubeCon EU 24. See the recording [here](https://youtu.be/cenTw6W", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "The purpose of this PR is to implement the RFC [0128](https://github.com/buildpacks/rfcs/blob/main/text/0128-multiarch-builders-and-package.md). \n\nIt adds the capability to the `pack buildpack package" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n| [Flag](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | Coverage Δ | |\n|---|---|---|\n| [os_linux](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `74.93% <21.32%> (-3.69%)` | :arrow_down: |\n| [os_macos](https://app.codecov.io/gh/buildpa\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/paketo-buildpacks/syft/pull/204. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR updates the pipeline builder create-package workflow automation to support multi-arch builds of this buildpack (ARM64 and AMD64). **The ARM64 image should be considered experimental for now**, since the official [Paketo RFC](https://github.com/paketo-buildpacks/rfcs/pull/288) and [CNB RFCs](https://github.com/buildpacks/rfcs/pull/295) related to multi-arch have not been merged in yet.\nIt uses an **experimental version of the pack CLI** (https://github.com/buildpacks/pack/pull/2086), and is based off of the upstream CNB RFC that is currently IN PROGRESS. This workflow will definitely be subject to change when an official `pack` release comes out, and if there are any changes to the upstream RFC.\nOnce we have merged this and seen it work on `main`, we will contribute it into the upstream pipeline-builder repository.\n(@sophiewigmore )\n\n## Checklist\n\n* [ ] I have viewed, signed, and submitted the Contributor License Agreement.\n* [ ] I have linked issue(s) that this PR should close using keywords or the Github UI (See [docs](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue))\n* [ ] I have added an integration test, if necessary.\n* [ ] I have reviewed the [styleguide](https://github.com/paketo-buildpacks/community/blob/main/STYLEGUIDE.md) for guidance on my code quality.\n* [ ] I'm happy with the commit history on this PR (I have rebased/squashed as needed).", + "codeSnippets": [ + "| [Flag](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | Coverage Δ | |\n|---|---|---|\n| [os_linux](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `74.93% <21.32%> (-3.69%)` | :arrow_down: |\n| [os_macos](https://app.codecov.io/gh/buildpa", + "| [Flag](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | Coverage Δ | |\n|---|---|---|\n| [os_linux](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `74.93% <21.32%> (-3.69%)` | :arrow_down: |\n| [os_macos](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `72.72% <20.28%> (-3.58%)` | :arrow_down: |\n| [os_windows](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `75.40% <21.32%> (-3.68%)` | :arrow_down: |\n| [unit](https://app.codecov.io/gh/buildpacks/pack/pull/2086/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks) | `75.98% <21.32%> (-3.71%)` | :arrow_down: |\n\nFlags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=buildpacks#carryforward-flags-in-the-pull-request-comment) to find out more.\n\n\nHello 👋\r\nWe have been testing the latest version", + "from \r\nhttps://github.com/buildpacks/pack/actions/runs/8118576298\r\n\r\nWe had success running, from the ~/buildpack directory, using:", + "with a layout similar to this:", + "and we successfully obtained a multi arch image:" + ] + } + }, + "metadata": { + "tags": [ + "buildpacks", + "incubating", + "app-definition", + "troubleshoot", + "type-enhancement", + "type-chore" + ], + "cncfProjects": [ + "buildpacks" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/buildpacks/pack/pull/2086", + "repo": "https://github.com/buildpacks/pack", + "pr": "https://github.com/paketo-buildpacks/syft/pull/204" + }, + "reactions": 2, + "comments": 16, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with buildpacks installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:36:35.338Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cedar/cedar-123-resolve-issue-114-expose-evaluationerror.json b/solutions/cncf-generated/cedar/cedar-123-resolve-issue-114-expose-evaluationerror.json new file mode 100644 index 00000000..a46086a3 --- /dev/null +++ b/solutions/cncf-generated/cedar/cedar-123-resolve-issue-114-expose-evaluationerror.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "cedar-123-resolve-issue-114-expose-evaluationerror", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "cedar: Resolve: Issue 114 Expose-EvaluationError", + "description": "## Reviewed by Kesha Hietala (khieta)\n\nSo, after some digging, I found the EvaluationError enum in `cedar-policy-core/src/evaluator/err.rs`‎ which is imported by the `cedar/cedar-policy-core/src/evaluator.rs` on lines 25-27.\n\nI haven't tested this Idea but using that information and the TODO I arrived at a solution.\n> /// TODO in the future this can/should be the actual Core `EvaluationError\n\n[!] There's an unfinished comment on line 447 of api.rs I meant to say something like `[+] Modified to R", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Reviewed by Kesha Hietala (khieta)\n\nSo, after some digging, I found the EvaluationError enum in `cedar-policy-core/src/evaluator/err.rs`‎ which is imported by the `cedar/cedar-policy-core/src/evalu" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n### Additional changes\r\n\r\nThere are 8 references to EvaluationError that would need to be changed.\r\nHere's an example using line 121-132 from cedar-policy/src/api.rs.\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/cedar-policy/cedar/pull/186. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## Description of changes\n\nThis PR adds the `AuthorizationError` type discussed in PR #123. I argue that this is a non-breaking change because errors returned by authorization were never truly exposed to users of `cedar-policy` -- they were (and still are) exposed only through a `String`. This PR slightly modifies the `String` that users will see for certain types of errors.\n\nIn particular, where before they may have seen a message like\n```\nwhile evaluating policy policy0, encountered the following error: type error: expected bool, got long\n```\nThey will now see\n```\nerror occurred while evaluating policy `policy0`: type error: expected bool, got long\n```\n\nThe reason for the large diff is that we rely heavily on this type of error message in our integration tests.\n\nI recommend that this PR be included in our next patch release.\n\n## Issue #, if available\n\nRelated to #114\n\n## Checklist for requesting a review\n\nThe change in this PR is (choose one, and delete the other options):\n\nI confirm that this PR (choose one, and delete the other options):\n\nI confirm that [`cedar-spec`](https://github.com/cedar-policy/cedar-spec) (choose one, and delete the other options):", + "codeSnippets": [ + "### Additional changes\r\n\r\nThere are 8 references to EvaluationError that would need to be changed.\r\nHere's an example using line 121-132 from cedar-policy/src/api.rs.", + "### Additional changes\r\n\r\nThere are 8 references to EvaluationError that would need to be changed.\r\nHere's an example using line 121-132 from cedar-policy/src/api.rs.", + "## Further changes\r\n`cedar-policy-core/src/authorizer.rs`\r\nThere are a few `Vec` calls that need to be updated.\r\nFor example, in the Diagnostics struct.\r\n- We need to update the Diagnostics struct to use `Vec` for the errors field instead of `Vec`", + "- Modify the Response struct's 'new' function to accept a `Vec` for the errors parameter and update the construction of diagnostics.", + "The Response struct will now use a `Vec` for the errors field within the Diagnostics struct. It's important to \r\nupdate any code that creates a new Response instance to pass a `Vec` for the errors parameter.\n## Description of changes\r\n\r\nThis PR adds the `AuthorizationError` type discussed in PR #123. I argue that this is a non-breaking change because errors returned by authorization were never truly exposed to users of `cedar-policy` -- they were (and still are) exposed only through a `String`. This PR slightly modifies the `String` that users will see for certain types of errors.\r\n\r\nIn particular, where before they may have seen a message like" + ] + } + }, + "metadata": { + "tags": [ + "cedar", + "sandbox", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "cedar" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/cedar-policy/cedar/pull/123", + "repo": "https://github.com/cedar-policy/cedar", + "pr": "https://github.com/cedar-policy/cedar/pull/186" + }, + "reactions": 0, + "comments": 29, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with cedar installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:39:20.434Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-2840-wip-implement-route-controller.json b/solutions/cncf-generated/cert-manager/cert-manager-2840-wip-implement-route-controller.json new file mode 100644 index 00000000..6085977d --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-2840-wip-implement-route-controller.json @@ -0,0 +1,109 @@ +{ + "version": "kc-mission-v1", + "name": "cert-manager-2840-wip-implement-route-controller", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "cert-manager: WIP Implement Route Controller", + "description": "**What this PR does / why we need it**:\nThis implements the route controller for openshift routes and allows annotating a route to apply the certificate from the provided secret. If the route API is not found it disables the controller\n\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #1064\n\n**Special notes for your reviewer**:\n\n**Release note**:", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**What this PR does / why we need it**:\nThis implements the route controller for openshift routes and allows annotating a route to apply the certificate from the provided secret. If the route API is " + }, + { + "title": "registering the controllers to the cainjectior manager, need help with that", + "description": "registering the controllers to the cainjectior manager, need help with that" + }, + { + "title": "disabling the route injection controller if routes are not available need .1 ...", + "description": "disabling the route injection controller if routes are not available need .1 first" + }, + { + "title": "verify where I added some needed constants (controller/util.go)", + "description": "verify where I added some needed constants (controller/util.go)" + }, + { + "title": "unit tests, I didn't have any in my code", + "description": "unit tests, I didn't have any in my code" + }, + { + "title": "documentation.", + "description": "documentation." + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nSigned-off-by: raffaelespazzoli \r\n\r\n\r\n\r\n**What this PR does / why we need it**:\r\nadds injection to openshift routes\r\nadds ability to present certificates as keystores and javastores\r\n\r\n**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes #1064 \r\n\r\n**Special notes for your reviewer**:\r\nThe following is missing:\r\n1. registering the controllers to the cainjectior manager, need help with that\r\n2. disabling the route injection controller if routes are not available need .1 first\r\n3. verify where I added some needed constants (controller/util.go)\r\n4. unit tests, I didn't have any in my code\r\n5. documentation.\r\n\r\n**Release note**:\r\n" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "deploy", + "do-not-merge-release-note-label-needed", + "needs-rebase", + "do-not-merge-work-in-progress", + "do-not-merge-hold", + "size-xxl", + "dco-signoff--yes", + "area-testing", + "ok-to-test", + "area-deploy", + "needs-kind" + ], + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "advanced", + "issueTypes": [ + "deploy" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/cert-manager/cert-manager/pull/2840", + "repo": "https://github.com/cert-manager/cert-manager", + "pr": "https://github.com/cert-manager/cert-manager/pull/2397" + }, + "reactions": 6, + "comments": 34, + "synthesizedBy": "regex", + "qualityScore": 68 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with cert-manager installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:51.186Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cert-manager/cert-manager-5084-wip-configurable-context-timeout.json b/solutions/cncf-generated/cert-manager/cert-manager-5084-wip-configurable-context-timeout.json new file mode 100644 index 00000000..97be8342 --- /dev/null +++ b/solutions/cncf-generated/cert-manager/cert-manager-5084-wip-configurable-context-timeout.json @@ -0,0 +1,86 @@ +{ + "version": "kc-mission-v1", + "name": "cert-manager-5084-wip-configurable-context-timeout", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "cert-manager: WIP: configurable context timeout", + "description": "### Pull Request Motivation\n\nThe initial issue is slow responding ZeroSSL ACME api. So most of the time cert-manager is not able to register.\n\n```\ncert-manager/controller/clusterissuers \"msg\"=\"failed to register an ACME account\" \"error\"=\"context deadline exceeded\"\n```\n\nThe root cause is already reported to ZeroSSL support but overall the fixed value context timeout of 10 seconds is not ideal when it come to edge-clusters with not reliable or slow connection.\n\nThis PR should:\n- fix #5080 \n- fix c", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Pull Request Motivation\n\nThe initial issue is slow responding ZeroSSL ACME api. So most of the time cert-manager is not able to register.\n\n```\ncert-manager/controller/clusterissuers \"msg\"=\"failed " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\ncert-manager/controller/clusterissuers \"msg\"=\"failed to register an ACME account\" \"error\"=\"context deadline exceeded\"\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/cert-manager/cert-manager/pull/5157. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> This PR compliments #5084, as this seems to be a stale PR.\n### Pull Request Motivation\nSome ACME providers take a long time to respond, where a hardcoded 10 seconds timeout is insufficient to request certificates, which results in a context deadline exceeded.\n\nThis PR should fix:\n* #5080 \n* cert-manager/website#583\n\n### Kind\nfeature\n\n### Release Note", + "codeSnippets": [ + "cert-manager/controller/clusterissuers \"msg\"=\"failed to register an ACME account\" \"error\"=\"context deadline exceeded\"", + "cert-manager/controller/clusterissuers \"msg\"=\"failed to register an ACME account\" \"error\"=\"context deadline exceeded\"", + "> This PR compliments #5084, as this seems to be a stale PR.\r\n### Pull Request Motivation\r\nSome ACME providers take a long time to respond, where a hardcoded 10 seconds timeout is insufficient to request certificates, which results in a context deadline exceeded.\r\n\r\nThis PR should fix:\r\n* #5080 \r\n* cert-manager/website#583\r\n\r\n### Kind\r\nfeature\r\n\r\n### Release Note\r\n\r\n", + "Hi @fatz. Thanks for your PR.\n\nI'm waiting for a [cert-manager](https://github.com/orgs/cert-manager/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/cert-manager/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=cert-manager%2Fcert-manager).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\n[APPROVALNOTIFIER] This PR is **NOT APPROVED**\n\nThis pull-request has been approved by: *fatz*\nTo complete the [pull request process](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process), please assign **jakexks** after the PR has been reviewed.\nYou can assign the PR to them by writing `/assign @jakexks` in a comment when ready.\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=cert-manager%2Fcert-manager).\n\n
\nNeeds approval from an approver in each of these files:\n\n- **[OWNERS](https://github.com/cert-manager/cert-manager/blob/master/OWNERS)**\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n/kind feature\r\n/ok-to-test\n@fatz: The following tests **failed**, say `/retest` to rerun all failed tests or `/retest-required` to rerun all mandatory failed tests:\n\nTest name | Commit | Details | Required | Rerun command\n--- | --- | --- | --- | ---\npull-cert-manager-make-e2e-v1-23 | 657c674a60457c1c35ae490d344dbfa417ffd5b7 | [link](https://prow.build-infra.jetstack.net/view/gs/jetstack-logs/pr-logs/pull/cert-manager_cert-manager/5084/pull-cert-manager-make-e2e-v1-23/1522595901862842368) | true | `/test pull-cert-manager-make-e2e-v1-23`\npull-cert-manager-make-test | 657c674a60457c1c35ae490d344dbfa417ffd5b7 | [link](https://prow.build-infra.jetstack.net/view/gs/jetstack-logs/pr-logs/pull/cert-manager_cert-manager/5084/pull-cert-manager-make-test/1522595901619572736) | true | `/test pull-cert-manager-make-test`\npull-cert-manager-bazel | 657c674a60457c1c35ae490d344dbfa417ffd5b7 | [link](https://prow.build-infra.jetstack.net/view/gs/jetstack-logs/pr-logs/pull/cert-manager_cert-manager/5084/pull-cert-manager-bazel/1522595901573435392) | true | `/test pull-cert-manager-bazel`\npull-cert-manager-e2e-v1-23 | 657c674a60457c1c35ae490d344dbfa417ffd5b7 | [link](https://prow.build-infra.jetstack.net/view/gs/jetstack-logs/pr-logs/pull/cert-manager_cert-manager/5084/pull-cert-manager-e2e-v1-23/1522595901799927808) | true | `/test pull-cert-manager-e2e-v1-23`\n\n[Full PR test history](https://prow.build-infra.jetstack.net/pr-history?org=cert-manager&repo=cert-manager&pr=5084). [Your PR dashboard](https://jetstack-build-infra.appspot.com/pr/fatz). Please help us cut down on flakes by [linking to](https://git.k8s.io/community/contributors/devel/sig-testing/flaky-tests.md#github-issues-for-known-flakes) an [open issue](https://github.com/cert-manager/cert-manager/issues?q=is:issue+is:open) when you hit one in your PR.\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository. I understand the commands that are listed [here](https://go.k8s.io/bot-commands).\n
\n\n> I also think we could maybe change the name from \"ContextTimeout\" to something like \"IssuerSetupTimeout\" so that it's super specific what the timeout we're configuring does 👍\r\n\r\nYeah sounds good. But I'm not 100% sure if its only this specific timeout\n- Push - \r\nStale since 10 days, sadly don't know how to work on the Repo otherwise I'd contrib. - Issue still persists. \nI've been using this patch for a few days now, but it as is, does not solve entirely the problem (at least in my case, using `zerossl` as the ACME provider).\r\n\r\nI also had to increase the timeout in the `pkg/acme/client/middleware/logger.go`, as I noticed it was failing at the beginning of the registration:", + "With this change, the registration now is taking `~15s` to complete and I have not seen issues anymore:" + ] + } + }, + "metadata": { + "tags": [ + "cert-manager", + "graduated", + "security", + "troubleshoot", + "release-note", + "do-not-merge-work-in-progress", + "kind-feature", + "size-m", + "dco-signoff--yes", + "ok-to-test" + ], + "cncfProjects": [ + "cert-manager" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/cert-manager/cert-manager/pull/5084", + "repo": "https://github.com/cert-manager/cert-manager", + "pr": "https://github.com/cert-manager/cert-manager/pull/5157" + }, + "reactions": 13, + "comments": 10, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with cert-manager installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:31.247Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-3066-helm-chart-support-latest-api-version-of-dashboard-ingress.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-3066-helm-chart-support-latest-api-version-of-dashboard-ingress.json new file mode 100644 index 00000000..8d2cf221 --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-3066-helm-chart-support-latest-api-version-of-dashboard-ingress.json @@ -0,0 +1,86 @@ +{ + "version": "kc-mission-v1", + "name": "chaos-mesh-3066-helm-chart-support-latest-api-version-of-dashboard-ingress", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "chaos-mesh: Helm chart: Support latest api version of dashboard ingress", + "description": "### What problem does this PR solve?\n\n### What's changed and how it works?\n\n- Added values to the `values.yaml` helm file for reference by `templates/ingress.yaml`.\n - `dashboard.ingress.apiVersionOverrides` field: apiVersion of ingress. This is used in `_helpers.tpl` to define apiVersion of ingress. \n - `dashboard.ingress.ingressClassName` field: For define ingress controller \n - `dashboard.ingress.paths` field: moved from `dashboard.ingress.hosts.paths`\n\n- `chaos-dashboard.ingress.apiVersio", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### What problem does this PR solve?\n\n### What's changed and how it works?\n\n- Added values to the `values.yaml` helm file for reference by `templates/ingress.yaml`.\n - `dashboard.ingress.apiVersionOv" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n### DCO\r\n\r\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/chaos-mesh/chaos-mesh/pull/3098. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### What problem does this PR solve?\n\nIt adds support for ingressClass objects in the Ingress Template for Chaos Mesh Helm chart. \n\n### What's changed and how it works?\n\ningressClassName should be specified in values.yaml instead of kubernetes.io/ingress.class: nginx annotation for Kubernetes >= 1.18\n\n### Related changes\n\n- Need to **cheery-pick to release branches**\n\n### Checklist\n\nTests\n\n- [X] No code\n\nSide effects\n\n### DCO\n\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:\n\n```shell\ngit commit --amend --signoff\ngit push --force\n```", + "codeSnippets": [ + "### DCO\r\n\r\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:", + "### DCO\r\n\r\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:", + "### What problem does this PR solve?\r\n\r\nIt adds support for ingressClass objects in the Ingress Template for Chaos Mesh Helm chart. \r\n\r\n### What's changed and how it works?\r\n\r\ningressClassName should be specified in values.yaml instead of kubernetes.io/ingress.class: nginx annotation for Kubernetes >= 1.18\r\n\r\n### Related changes\r\n\r\n- [ ] Need to update `chaos-mesh/website`\r\n- [ ] Need to update `Dashboard UI`\r\n- Need to **cheery-pick to release branches**\r\n - [ ] release-2.1\r\n - [ ] release-2.0\r\n\r\n### Checklist\r\n\r\nTests\r\n\r\n\r\n\r\n- [ ] Unit test\r\n- [ ] E2E test\r\n- [X] No code\r\n- [ ] Manual test (add steps below)\r\n\r\n\r\n\r\nSide effects\r\n\r\n- [ ] Breaking backward compatibility\r\n\r\n### Release note ", + "### DCO\r\n\r\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "deploy", + "status-can-merge", + "status-lgt2", + "contribution", + "first-time-contributor", + "size-m" + ], + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [ + "Ingress" + ], + "difficulty": "intermediate", + "issueTypes": [ + "deploy" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/chaos-mesh/chaos-mesh/pull/3066", + "repo": "https://github.com/chaos-mesh/chaos-mesh", + "pr": "https://github.com/chaos-mesh/chaos-mesh/pull/3098" + }, + "reactions": 4, + "comments": 15, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with chaos-mesh installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:36:40.266Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/chaos-mesh/chaos-mesh-3476-chore-configure-qps-and-burst-for-chaos-dashboard.json b/solutions/cncf-generated/chaos-mesh/chaos-mesh-3476-chore-configure-qps-and-burst-for-chaos-dashboard.json new file mode 100644 index 00000000..f202265f --- /dev/null +++ b/solutions/cncf-generated/chaos-mesh/chaos-mesh-3476-chore-configure-qps-and-burst-for-chaos-dashboard.json @@ -0,0 +1,86 @@ +{ + "version": "kc-mission-v1", + "name": "chaos-mesh-3476-chore-configure-qps-and-burst-for-chaos-dashboard", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "chaos-mesh: chore: configure QPS and Burst for chaos dashboard", + "description": "### What problem does this PR solve?\n\n### What's changed and how it works?\n\n- append `QPS` and `Burst` to `ChaosDashboardConfig`\n- when initializing kubernetes client, respect these configurations.\n\n### Related changes\n\n- Need to **cheery-pick to release branches**\n\n### Checklist\n\nCHANGELOG\n\nTests\n\nSide effects\n\n### DCO\n\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:\n\n```shell\n", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### What problem does this PR solve?\n\n### What's changed and how it works?\n\n- append `QPS` and `Burst` to `ChaosDashboardConfig`\n- when initializing kubernetes client, respect these configurations.\n\n#" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\ngit commit --amend --signoff\r\ngit push --force\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/chaos-mesh/chaos-mesh/pull/3479. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "cherry-pick #3476 to release-2.3\nYou can switch your code base to this Pull Request by using [git-extras](https://github.com/tj/git-extras):\n```bash\n# In chaos-mesh repo:\ngit pr https://github.com/chaos-mesh/chaos-mesh/pull/3479\n```\n\nAfter apply modifications, you can push your change to this PR via:\n```bash\ngit push git@github.com:ti-srebot/chaos-mesh.git pr/3479:release-2.3-457723db198c\n```\n\n---\n\n### What problem does this PR solve?\n\n### What's changed and how it works?\n\n- append `QPS` and `Burst` to `ChaosDashboardConfig`\n- when initializing kubernetes client, respect these configurations.\n\n### Related changes\n\n- Need to **cheery-pick to release branches**\n\n### Checklist\n\nCHANGELOG\n\nTests\n\nSide effects\n\n### DCO\n\nIf you find the DCO check fails, please run commands like below (Depends on the actual situations. For example, if the failed commit isn't the most recent) to fix it:\n\n```shell\ngit commit --amend --signoff\ngit push --force\n```", + "codeSnippets": [ + "git commit --amend --signoff\r\ngit push --force", + "git commit --amend --signoff\r\ngit push --force", + "# In chaos-mesh repo:\ngit pr https://github.com/chaos-mesh/chaos-mesh/pull/3479", + "git push git@github.com:ti-srebot/chaos-mesh.git pr/3479:release-2.3-457723db198c", + "git commit --amend --signoff\r\ngit push --force" + ] + } + }, + "metadata": { + "tags": [ + "chaos-mesh", + "incubating", + "orchestration", + "troubleshoot", + "status-can-merge", + "status-lgt2", + "size-m", + "needs-cherry-pick-release-2-1", + "needs-cherry-pick-release-2-2", + "needs-cherry-pick-release-2-3" + ], + "cncfProjects": [ + "chaos-mesh" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/chaos-mesh/chaos-mesh/pull/3476", + "repo": "https://github.com/chaos-mesh/chaos-mesh", + "pr": "https://github.com/chaos-mesh/chaos-mesh/pull/3479" + }, + "reactions": 3, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with chaos-mesh installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:36:50.093Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-15383-add-wireguard-support.json b/solutions/cncf-generated/cilium/cilium-15383-add-wireguard-support.json new file mode 100644 index 00000000..b856d7be --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-15383-add-wireguard-support.json @@ -0,0 +1,77 @@ +{ + "version": "kc-mission-v1", + "name": "cilium-15383-add-wireguard-support", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "cilium: Add Wireguard support", + "description": "This PR adds a native Wireguard support. Currently, the feature is restricted to Kubernetes and ClusterPool (with single podCIDR per node and IP family) IPAM, and works in the direct routing mode only (the tunneling mode is going to be supported in the future).\n\nThe feature consists of two major components - `pkg/wireguard/agent` (which is run by cilium-agent) and `pkg/wireguard/operator` (run by a cilium-operator leader).\n\nAt the high level, we create a wireguard tunnel device (`cilium_wg0`) on", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR adds a native Wireguard support. Currently, the feature is restricted to Kubernetes and ClusterPool (with single podCIDR per node and IP family) IPAM, and works in the direct routing mode only" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ ip rule show\r\n1: from all fwmark 0xe00/0xf00 lookup 201\r\n[...]\r\n$ ip route show table 201\r\ndefault dev cilium_wg0\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> Nice rocket\n> \n> Haven't looked at the full PR in detail, but stumbled on this bit in the description:\n> \n> > The feature consists of two major componens - `pkg/wireguard/agent.go` (which is run by cilium-agent) and `pkg/wireguard/operator.go` (run by a cilium-operator leader).\n> \n> Ideally (and if possible) I'd suggest to split the agent and operator components into separate packages `pkg/wireguard/agent` and `pkg/wireguard/operator`, with only the common parts (if any) residing in `pkg/wireguard`. This will be beneficial in terms of binary size since this avoids unnecessrily pulling in transitive dependencies (e.g. as far as I can see only `pkg/wireguard/agent.go` pulls in `golang.zx2c4.com/wireguard/wgctrl` while `pkg/wireguard/operator.go` doesn't).\n\nThis was good feedback! We addressed it in the latest push. The code is ready to be reviewed in-depth now.", + "codeSnippets": [ + "$ ip rule show\r\n1: from all fwmark 0xe00/0xf00 lookup 201\r\n[...]\r\n$ ip route show table 201\r\ndefault dev cilium_wg0", + "$ ip rule show\r\n1: from all fwmark 0xe00/0xf00 lookup 201\r\n[...]\r\n$ ip route show table 201\r\ndefault dev cilium_wg0" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "troubleshoot", + "release-note-major", + "ready-to-merge" + ], + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Pod", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/cilium/cilium/pull/15383", + "repo": "https://github.com/cilium/cilium" + }, + "reactions": 16, + "comments": 21, + "synthesizedBy": "regex", + "qualityScore": 65 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with cilium installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:00.108Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-18463-adding-support-for-aws-eni-prefix-delegation-ipv4-only.json b/solutions/cncf-generated/cilium/cilium-18463-adding-support-for-aws-eni-prefix-delegation-ipv4-only.json new file mode 100644 index 00000000..83384518 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-18463-adding-support-for-aws-eni-prefix-delegation-ipv4-only.json @@ -0,0 +1,86 @@ +{ + "version": "kc-mission-v1", + "name": "cilium-18463-adding-support-for-aws-eni-prefix-delegation-ipv4-only", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "cilium: Adding support for AWS ENI prefix delegation - IPv4 Only", + "description": "AWS introduced support for assigning prefixes to EC2 network interfaces - [ prefix delegation (pd) ](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html). Some of the benefits of using PD are:\n* Increased pod density on nodes (~ 16x more pods)\n* Reduced reliance on operator for pod IP allocation.\n* Reduced API calls to AWS and faster pod startup time.\n* Reduced cost in Amazon VPC IP Address Manager https://github.com/cilium/cilium/issues/16987#issuecomment-1006225191\n\nWith `a", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "AWS introduced support for assigning prefixes to EC2 network interfaces - [ prefix delegation (pd) ](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html). Some of the benefits of u" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n### Failure Output\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/cilium/cilium/pull/18557. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Currently in CI only `cilium-cli` based connectivity tests are run for AWS ENI mode. AFAIK, There seems to be no easy way to write e2e tests exclusive to a cloud provider. This commit introduces a new ginkgo focus group for ENI, which can be used to house e2e tests for cloud provider specific features like AWS excess IP release, ENI prefix delegation, etc. There are more features that aren't currently tested e2e in CI and this focus group should make it easy to add them.\n\nThis could also be achieved with build tags maybe ? Please suggest if there's an easier / better way to achieve this.\n\nThis PR also adds an e2e test for changes added from https://github.com/cilium/cilium/pull/17939 and is needed to trigger the e2e test in https://github.com/cilium/cilium/pull/18463\n\nLink to [successful workflow](https://github.com/DataDog/cilium/runs/5205944242?check_suite_focus=true) run with incoming changes.\n\nTodo :", + "codeSnippets": [ + "### Failure Output", + "### Failure Output", + "\n\nIf it is a flake and a GitHub issue doesn't already exist to track it, comment `/mlh new-flake Cilium-PR-K8s-1.23-kernel-net-next` so I can create one.\n/test\n\nJob 'Cilium-PR-K8s-1.21-kernel-5.4' failed:\n
Click to show.\n\n### Test Name", + "### Failure Output", + "
\n\nIf it is a flake and a GitHub issue doesn't already exist to track it, comment `/mlh new-flake Cilium-PR-K8s-1.21-kernel-5.4` so I can create one.\n\nJob 'Cilium-PR-K8s-1.23-kernel-net-next' failed:\n
Click to show.\n\n### Test Name" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "troubleshoot", + "release-note-major", + "ready-to-merge", + "area-eni" + ], + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Pod", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/cilium/cilium/pull/18463", + "repo": "https://github.com/cilium/cilium", + "pr": "https://github.com/cilium/cilium/pull/18557" + }, + "reactions": 13, + "comments": 18, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with cilium installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:07.089Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-20090-k8s-allow-setting-multiple-k8s-api-server-addresses.json b/solutions/cncf-generated/cilium/cilium-20090-k8s-allow-setting-multiple-k8s-api-server-addresses.json new file mode 100644 index 00000000..062de09f --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-20090-k8s-allow-setting-multiple-k8s-api-server-addresses.json @@ -0,0 +1,78 @@ +{ + "version": "kc-mission-v1", + "name": "cilium-20090-k8s-allow-setting-multiple-k8s-api-server-addresses", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "cilium: k8s: allow setting multiple k8s API server addresses", + "description": "See commit message for detailed description\n\nIntroduce a new command line parameter(`--k8s-api-server-urls`) and helm option(`k8s.apiServerURLs`) to specify multiple k8s API server addresses for the client to use.\n\nFixes: #19038", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "See commit message for detailed description\n\nIntroduce a new command line parameter(`--k8s-api-server-urls`) and helm option(`k8s.apiServerURLs`) to specify multiple k8s API server addresses for the c" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n@fristonio Welcome back! :sweat_smile: Would it be possible to set the new param from Helm? Currently, we use this hack to pass the API server endpoint addr - https://github.com/cilium/cilium/blob/master/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml#L193.\nHey @brb 👋 \r\nIts good to be back. 😄 \r\nYeah, I have some changes locally for the helm option that I need to test. Will update the PR soon.\n/test\nHey @qmonnet! 👋 \r\nYeah, changing the CLI flag to use `urls` instead of `address\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Job 'Cilium-PR-K8s-1.24-kernel-4.19' failed:\n
Click to show.\n\n### Test Name\n```test-name\nK8sDatapathConfig Host firewall With VXLAN\n```\n\n### Failure Output\n```failure-output\nFAIL: Failed to reach 192.168.56.11:80 from testclient-p25x5\n```\n\n
\n\nIf it is a flake and a GitHub issue doesn't already exist to track it, comment `/mlh new-flake Cilium-PR-K8s-1.24-kernel-4.19` so I can create one.\n\nJob 'Cilium-PR-K8s-1.23-kernel-net-next' failed:\n
Click to show.\n\n### Test Name\n```test-name\nK8sEgressGatewayTest tunnel disabled with endpointRoutes enabled no egress gw policy connectivity works\n```\n\n### Failure Output\n```failure-output\nFAIL: Expected command: kubectl exec -n kube-system log-gatherer-kvm24 -- curl --path-as-is -s -D /dev/stderr --fail --connect-timeout 5 --max-time 20 http://192.168.56.11:20080 -w \"time-> DNS: '%{time_namelookup}(%{remote_ip})', Connect: '%{time_connect}',Transfer '%{time_starttransfer}', total '%{time_total}'\" \n```\n\n
\n\nIf it is a flake and a GitHub issue doesn't already exist to track it, comment `/mlh new-flake Cilium-PR-K8s-1.23-kernel-net-next` so I can create one.", + "codeSnippets": [ + "@fristonio Welcome back! :sweat_smile: Would it be possible to set the new param from Helm? Currently, we use this hack to pass the API server endpoint addr - https://github.com/cilium/cilium/blob/master/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml#L193.\nHey @brb 👋 \r\nIts good to be back. 😄 \r\nYeah, I have some changes locally for the helm option that I need to test. Will update the PR soon.\n/test\nHey @qmonnet! 👋 \r\nYeah, changing the CLI flag to use `urls` instead of `address", + "@fristonio Welcome back! :sweat_smile: Would it be possible to set the new param from Helm? Currently, we use this hack to pass the API server endpoint addr - https://github.com/cilium/cilium/blob/master/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml#L193.\nHey @brb 👋 \r\nIts good to be back. 😄 \r\nYeah, I have some changes locally for the helm option that I need to test. Will update the PR soon.\n/test\nHey @qmonnet! 👋 \r\nYeah, changing the CLI flag to use `urls` instead of `addresses` makes sense to me.\r\nI have updated the PR and addressed the changes you requested. \n/test\n\nJob 'Cilium-PR-K8s-1.24-kernel-4.19' failed:\n
Click to show.\n\n### Test Name", + "### Failure Output", + "
\n\nIf it is a flake and a GitHub issue doesn't already exist to track it, comment `/mlh new-flake Cilium-PR-K8s-1.24-kernel-4.19` so I can create one.\n\nJob 'Cilium-PR-K8s-1.23-kernel-net-next' failed:\n
Click to show.\n\n### Test Name", + "### Failure Output" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "troubleshoot", + "area-daemon", + "release-note-minor", + "stale" + ], + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/cilium/cilium/pull/20090", + "repo": "https://github.com/cilium/cilium" + }, + "reactions": 12, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with cilium installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:11.206Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-37601-kpr-support-kube-apiserver-ha.json b/solutions/cncf-generated/cilium/cilium-37601-kpr-support-kube-apiserver-ha.json new file mode 100644 index 00000000..97daee40 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-37601-kpr-support-kube-apiserver-ha.json @@ -0,0 +1,78 @@ +{ + "version": "kc-mission-v1", + "name": "cilium-37601-kpr-support-kube-apiserver-ha", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "cilium: kpr: Support kube-apiserver HA", + "description": "Cilium agent requires a connection to the kube-apiserver control plane to program the BPF datapath without depending on kube-proxy load-balancing when kube-proxy replacement is enabled. To achieve this, Cilium uses `API_SERVER_IP` and `API_SERVER_PORT` configurations for direct connection to the kube-apiserver. However, this approach doesn't support production environments that require multiple kube-apiservers for high availability. Additionally, it cannot rely on a fixed set of addresses provid", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Cilium agent requires a connection to the kube-apiserver control plane to program the BPF datapath without depending on kube-proxy load-balancing when kube-proxy replacement is enabled. To achieve thi" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n/test\n/test\n/test\n/test\n/test\n/test\nci-clustermesh is failing with known breakages. Rebasing the PR to pick up the upstream fixes.\r\n\r\nOther failures - \r\n\r\nhttps://github.com/cilium/cilium/issues/36902\r\nhttps://github.com/cilium/cilium/issues/37763\n/test\n/test\n/test\n/test\n/test\n> Docs good. A paragraph under https://docs.cilium.io/en/latest/network/kubernetes/kubeproxy-free/ would be nice as a follow-up too.\r\n\r\nYes, the KPR documentation does warrant a paragraph. I'll push a commit once in-flight\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> Have we considered that client-side load balancing for this feature might not be the right approach?\n> \n> Kubernetes pods (including Cilium Agent) have the `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` environment variables, which allow for server-side load balancing of the Kubernetes API server.\n> \n> This load balancing is then typically the responsibility of the Kubernetes control plane provider.\n> \n> Example:\n> \n> ```\n> ~ $ k exec -ti -n kube-system cilium-7eplp -c cilium-agent -- bash\n> root@ip-10-4-6-110:/home/cilium# echo \"$KUBERNETES_SERVICE_HOST\"\n> A93391BB28B5A90F2585A3E36A26C11F.sk1.us-east-2.eks.amazonaws.com\n> root@ip-10-4-6-110:/home/cilium# echo \"$KUBERNETES_SERVICE_PORT\"\n> 443\n> ```\n\nSee the details in this commit - https://github.com/cilium/cilium/pull/37601/commits/bb50c3673f1e08aa2d247c72348e34e3ba7b8066.", + "codeSnippets": [ + "/test\n/test\n/test\n/test\n/test\n/test\nci-clustermesh is failing with known breakages. Rebasing the PR to pick up the upstream fixes.\r\n\r\nOther failures - \r\n\r\nhttps://github.com/cilium/cilium/issues/36902\r\nhttps://github.com/cilium/cilium/issues/37763\n/test\n/test\n/test\n/test\n/test\n> Docs good. A paragraph under https://docs.cilium.io/en/latest/network/kubernetes/kubeproxy-free/ would be nice as a follow-up too.\r\n\r\nYes, the KPR documentation does warrant a paragraph. I'll push a commit once in-flight", + "/test\n/test\n/test\n/test\n/test\n/test\nci-clustermesh is failing with known breakages. Rebasing the PR to pick up the upstream fixes.\r\n\r\nOther failures - \r\n\r\nhttps://github.com/cilium/cilium/issues/36902\r\nhttps://github.com/cilium/cilium/issues/37763\n/test\n/test\n/test\n/test\n/test\n> Docs good. A paragraph under https://docs.cilium.io/en/latest/network/kubernetes/kubeproxy-free/ would be nice as a follow-up too.\r\n\r\nYes, the KPR documentation does warrant a paragraph. I'll push a commit once in-flight reviews are in.\n/test\n@lambdanis @dylandreimerink Thanks for the reviews -- addressed your comments. PTAL!\n/test\nSo, an interesting thing about how how the `default/kubernetes` service works: it's Special. It doesn't use a label selector like other services. Rather, each apiserver manually adds its own IP to the Endpoints when starting, and removes itself when going down.\r\n\r\nAn interesting problem is that Azure Kubernetes Service only ever has zero-or-one IP in the Endpoints. When the apiserver is being failed over, the old one first goes down, briefly causing 0 Endpoints to back the service. Then a new one is created and life goes on.\r\n\r\nFor clients connecting to the domain name this works fine. Likewise, for clients connecting to the service IP, this also works. However, does this code handle that case? It's very hard to tell.\r\n\r\nI would ask you to write some docblocks for the resolving code; I had a very hard time understanding what the functions do. Given that any potential issues are likely to be very high severity, it would be nice to have this well-documented.\n> An interesting problem is that Azure Kubernetes Service only ever has zero-or-one IP in the Endpoints. When the apiserver is being failed over, the old one first goes down, briefly causing 0 Endpoints to back the service. Then a new one is created and life goes on.\r\n> \r\n> For clients connecting to the domain name this works fine. Likewise, for clients connecting to the service IP, this also works. However, does this code handle that case? It's very hard to tell.\r\n> \r\n\r\nThe agent does switch to the apiservice address, but I don't know about the internal details of the AKS case.\r\nCan you elaborate how clients connect to the service VIP when there are 0 endpoints? \r\n\r\n> I would ask you to write some docblocks for the resolving code; I had a very hard time understanding what the functions do. Given that any potential issues are likely to be very high severity, it would be nice to have this well-documented.\r\n\r\nAgreed, there are in-line comments throughout the code that handles the HA functionality. But I'll also add a high level overview, and some more comments. \n> Have we considered that client-side load balancing for this feature might not be the right approach?\r\n> \r\n> Kubernetes pods (including Cilium Agent) have the `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` environment variables, which allow for server-side load balancing of the Kubernetes API server.\r\n> \r\n> This load balancing is then typically the responsibility of the Kubernetes control plane provider.\r\n> \r\n> Example:\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "troubleshoot", + "kind-feature", + "release-note-major", + "area-loadbalancing" + ], + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Service", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/cilium/cilium/pull/37601", + "repo": "https://github.com/cilium/cilium" + }, + "reactions": 24, + "comments": 25, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with cilium installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:58.786Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cilium/cilium-41406-policy-return-icmp-destination-unreachable-on-ipv4-egress-policy-de.json b/solutions/cncf-generated/cilium/cilium-41406-policy-return-icmp-destination-unreachable-on-ipv4-egress-policy-de.json new file mode 100644 index 00000000..97af4e36 --- /dev/null +++ b/solutions/cncf-generated/cilium/cilium-41406-policy-return-icmp-destination-unreachable-on-ipv4-egress-policy-de.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "cilium-41406-policy-return-icmp-destination-unreachable-on-ipv4-egress-policy-de", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "cilium: policy: return ICMP \"Destination unreachable\" on ipv4 egress policy denials", + "description": "This PR adds a new flag `--policy-deny-response` to control the way we reject packets when they are getting a policy denial. There are two options:\n- `none` - this is the behavior we have today, packets are dropped silently. This can cause clients to timeout indefinitely (see more context in https://github.com/cilium/cilium/issues/17944)\n- `icmp` - **new feature**: this sends an ICMP \"Destination Unreachable / Packet Filtered\" response to the client so that they can kill the connection much quic", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR adds a new flag `--policy-deny-response` to control the way we reject packets when they are getting a policy denial. There are two options:\n- `none` - this is the behavior we have today, packe" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n# curl --connect-timeout 5 google.com\r\ncurl: (28) Failed to connect to google.com port 80: Connection timed out\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> * Before I continue any further with tests, docs, Helm values, etc. I'd like to get some feedback on whether this approach generally makes sense.\n\nThe approach looks good to me :)\n\nShould we return icmp-port-unreachable for L4 drops?\n\n> * If the approach makes sense, I'm also curious to know what would be the best way to test this behavior. Should I add something in `bpf/tests` or e2e tests or both?\n\nIMO, `bpf/tests` is enough to validate this.\n\n> FWIW I can see that [there's rate-limiting](https://github.com/cilium/cilium/blob/86dd1d02444f4d5b6c826af234f7f37432563398/bpf/lib/lb.h#L1997) only for the ICMPv6 version of the `SERVICE_NO_BACKEND_RESPONSE` feature but not for ICMPv4\n\nThat doesn't look expected to me. We might need to discuss/track it into a dedicated issue. cc @dylandreimerink", + "codeSnippets": [ + "# curl --connect-timeout 5 google.com\r\ncurl: (28) Failed to connect to google.com port 80: Connection timed out", + "# curl --connect-timeout 5 google.com\r\ncurl: (28) Failed to connect to google.com port 80: Connection timed out", + "# curl google.com\r\ncurl: (7) Failed to connect to google.com port 80: No route to host", + "# ping 142.250.75.238\r\nPING 142.250.75.238 (142.250.75.238) 56(84) bytes of data.\r\nFrom 142.250.75.238 icmp_seq=1 Packet filtered\r\nFrom 142.250.75.238 icmp_seq=2 Packet filtered\r\n[...]" + ] + } + }, + "metadata": { + "tags": [ + "cilium", + "graduated", + "networking", + "troubleshoot", + "area-datapath", + "release-note-major", + "sig-policy" + ], + "cncfProjects": [ + "cilium" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Ingress" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/cilium/cilium/pull/41406", + "repo": "https://github.com/cilium/cilium" + }, + "reactions": 9, + "comments": 7, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with cilium installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:19.718Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-10177-multipart-layer-fetch.json b/solutions/cncf-generated/containerd/containerd-10177-multipart-layer-fetch.json new file mode 100644 index 00000000..45dea010 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-10177-multipart-layer-fetch.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "containerd-10177-multipart-layer-fetch", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "containerd: Multipart layer fetch", + "description": "TLDR: this makes pulls of big images ~2x faster (edit: and a bit more in the latest iteration), and closes #9922. \n\ncc: #8160, #4989\n___ \n\nHello Containerd People, I have this draft PR I would like to get your eyes on.\n\nIt basically makes pulls faster, but also tries to have not such a big memory impact, by getting consecutive chunks of the layers and immediately pushing them in the pipe (that writes to a file + that signature checksum thing).\nI noticed it made pulls ~2x faster, when using the c", + "type": "analyze", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "TLDR: this makes pulls of big images ~2x faster (edit: and a bit more in the latest iteration), and closes #9922. \n\ncc: #8160, #4989\n___ \n\nHello Containerd People, I have this draft PR I would like to" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\ndst agv_time count(*)\r\n----- ---------------- --------\r\ntmpfs 44.0761538461539 13 \r\n\r\ndst c_para chunk_size_b ctd_max_con agv_time count(*)\r\n----- ------ ------------ ----------- -------- --------\r\ntmpfs 110 32 3 22.625 4 \r\ntmpfs 100 32 3 22.64 5 \r\ntmpfs 130 32 2 22.76 1 \r\ntmpfs 120 32 4 22.824 5 \r\ntmpfs 110 32 \n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Thanks @azr, the numbers on this look good. @swagatbora90 super helpful stats.\n\nWe should continue to respect `max_concurrent_downloads` as the upper limit for actively downloading connections. Can we make the parallelism dynamic, maybe use [TryAcquire](https://pkg.go.dev/golang.org/x/sync/semaphore#Weighted.TryAcquire) until we hit the limit to determine the parallelism. It definitely makes sense to use the concurrency allowance to download the first layers faster rather than spreading across all layers, since unpack still takes up a significant amount of the pull time.\n\nFor configuration, let's use the transfer service configuration. This won't make it in for 2.0 and CRI will be switching to transfer service by 2.1.", + "codeSnippets": [ + "dst agv_time count(*)\r\n----- ---------------- --------\r\ntmpfs 44.0761538461539 13 \r\n\r\ndst c_para chunk_size_b ctd_max_con agv_time count(*)\r\n----- ------ ------------ ----------- -------- --------\r\ntmpfs 110 32 3 22.625 4 \r\ntmpfs 100 32 3 22.64 5 \r\ntmpfs 130 32 2 22.76 1 \r\ntmpfs 120 32 4 22.824 5 \r\ntmpfs 110 32", + "dst agv_time count(*)\r\n----- ---------------- --------\r\ntmpfs 44.0761538461539 13 \r\n\r\ndst c_para chunk_size_b ctd_max_con agv_time count(*)\r\n----- ------ ------------ ----------- -------- --------\r\ntmpfs 110 32 3 22.625 4 \r\ntmpfs 100 32 3 22.64 5 \r\ntmpfs 130 32 2 22.76 1 \r\ntmpfs 120 32 4 22.824 5 \r\ntmpfs 110 32 2 22.85 1 \r\ntmpfs 80 32 4 22.99 1 \r\ntmpfs 110 32 4 23.018 5 \r\ntmpfs 90 64 4 23.09 1 \r\ntmpfs 90 32 3 23.18 1 \r\ntmpfs 110 64 3 23.2125 4 \r\ntmpfs 80 64 3 23.29 1 \r\ntmpfs 90 64 3 23.32 1 \r\ntmpfs 100 32 4 23.352 5 \r\ntmpfs 70 15 4 23.4 1 \r\ntmpfs 100 64 3 23.65 5 \r\ntmpfs 120 15 3 23.68 1 \r\ntmpfs 110 64 2 23.74 1 \r\ntmpfs 100 64 4 23.77 5 \r\ntmpfs 70 32 4 23.81 5 \r\ntmpfs 120 32 3 23.83 5\r\n[...]", + "dst agv_time count(*)\r\n---------- ---------------- --------\r\nadded-nvme 47.4008333333333 12 \r\n\r\ndst c_para chunk_size_mb ctd_max_con agv_time count(*)\r\n---------- ------ ------------ ----------- -------- --------\r\nadded-nvme 130 32 3 25.24 1 \r\nadded-nvme 70 32 4 26.1 1 \r\nadded-nvme 80 32 3 26.31 1 \r\nadded-nvme 100 32 3 26.38 1 \r\nadded-nvme 120 32 4 26.58 1 \r\nadded-nvme 130 32 2 26.71 1 \r\nadded-nvme 80 32 4 26.73 1 \r\nadded-nvme 120 10 3 26.82 1 \r\nadded-nvme 80 64 3 26.93 1", + "Total Image size: 8.6 GB -rw-r--r-- 1000:1000 205 B │ │ │ └── README\r\nPotential wasted space: 34 MB drwxr-xr-x 1000:1000 319 B │ │ ├── Xresources\r\nImage efficiency score: 99 %", + "Total Image size: 27 GB\r\nPotential wasted space: 147 MB\r\nImage efficiency score: 99 %" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "analyze", + "impact-changelog", + "ok-to-test", + "kind-performance", + "size-xl", + "area-distribution" + ], + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "issueTypes": [ + "analyze" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/containerd/containerd/pull/10177", + "repo": "https://github.com/containerd/containerd" + }, + "reactions": 25, + "comments": 40, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with containerd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:45.248Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-3085-shim-pluggable-logging.json b/solutions/cncf-generated/containerd/containerd-3085-shim-pluggable-logging.json new file mode 100644 index 00000000..2a73b838 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-3085-shim-pluggable-logging.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "containerd-3085-shim-pluggable-logging", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "containerd: Shim pluggable logging", + "description": "This adds logging facilities at the shim level to provide minimal I/O\noverhead and pluggable logging options. Log handling is done within the\nshim so that all I/O, cpu, and memory can be charged to the container.\n\nA sample logging driver setting up logging for a container the systemd\njournal looks like this:\n\n```go\npackage main\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\n\t\"github.com/containerd/containerd/runtime/v2/logging\"\n\t\"github.com/coreos/go-systemd/journal\"\n)\n\nfunc main() {\n\tlogg", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This adds logging facilities at the shim level to provide minimal I/O\noverhead and pluggable logging options. Log handling is done within the\nshim so that all I/O, cpu, and memory can be charged to t" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nA `logging` package has been created to assist log developers create\r\nlogging plugins for containerd.\r\n\r\nThis uses a URI based approach for logging drivers that can be expanded\r\nin the future.\r\n\r\nSupported URI scheme's are:\r\n\r\n* binary\r\n* fifo\r\n* file\r\n\r\nYou can pass the log url via ctr on the command line:\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/containerd/containerd/pull/3154. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Backport of for the release/1.2 branch\n\nfirst commit didn't apply clean due to https://github.com/containerd/containerd/pull/3085 not being in the 1.2 branch", + "codeSnippets": [ + "A `logging` package has been created to assist log developers create\r\nlogging plugins for containerd.\r\n\r\nThis uses a URI based approach for logging drivers that can be expanded\r\nin the future.\r\n\r\nSupported URI scheme's are:\r\n\r\n* binary\r\n* fifo\r\n* file\r\n\r\nYou can pass the log url via ctr on the command line:", + "A `logging` package has been created to assist log developers create\r\nlogging plugins for containerd.\r\n\r\nThis uses a URI based approach for logging drivers that can be expanded\r\nin the future.\r\n\r\nSupported URI scheme's are:\r\n\r\n* binary\r\n* fifo\r\n* file\r\n\r\nYou can pass the log url via ctr on the command line:", + "The following client side Opts are added:" + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "troubleshoot" + ], + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/containerd/containerd/pull/3085", + "repo": "https://github.com/containerd/containerd", + "pr": "https://github.com/containerd/containerd/pull/3154" + }, + "reactions": 6, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with containerd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:51.416Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-6702-cri-improve-image-pulling-performance.json b/solutions/cncf-generated/containerd/containerd-6702-cri-improve-image-pulling-performance.json new file mode 100644 index 00000000..96582845 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-6702-cri-improve-image-pulling-performance.json @@ -0,0 +1,92 @@ +{ + "version": "kc-mission-v1", + "name": "containerd-6702-cri-improve-image-pulling-performance", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "containerd: CRI: improve image pulling performance", + "description": "### Background:\n\nWith current design, the content backend uses key-lock for long-lived\nwrite transaction. If the content reference has been marked for write\ntransaction, the other requestes on the same reference will fail fast with\nunavailable error. Since the metadata plugin is based on boltbd which\nonly supports single-writer, the content backend can't block or handle\nthe request too long. It requires the client to handle retry by itself,\nlike OpenWriter - backoff retry helper. But the maximum", + "type": "analyze", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Background:\n\nWith current design, the content backend uses key-lock for long-lived\nwrite transaction. If the content reference has been marked for write\ntransaction, the other requestes on the sam" + }, + { + "title": "The first active snapshot it's waiting on gets removed via `Remove` (so it wa...", + "description": "The first active snapshot it's waiting on gets removed via `Remove` (so it was never committed). For this case there" + }, + { + "title": "First active s", + "description": "First active s" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nBoth content backoff retry and unnecessary unpack impacts the performance.\r\n\r\n### Solution:\r\n\r\nIntroduced the duplicate suppression in fetch and unpack context. The\r\ndeplicate suppression uses key-mutex and single-waiter-notify to support\r\nsingleflight. The caller can use the duplicate suppression in different\r\nPullImage handlers so that we can avoid unnecessary unpack and spin-lock\r\nin OpenWriter.\r\n\r\n### Test Result:\r\n\r\n#### Before enhancement:\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/containerd/containerd/pull/6318. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "With the way things work right now, there's nothing stopping a parallel unpack of the exact\nsame layer to a snapshot. The first one to get committed will live on while the other(s) get garbage collected\nso in the end things work out, but regardless of this it's wasted work. The real issue is that while unpack\nshould be pretty cheap on Linux, the opposite is true for the Windows and lcow formats. Kicking off\n10 parallel pulls of the same image brings my 6 core machine to a halt and pushes 100% cpu utilization.\nWhat all of this ends up causing is exponentially slower parallel pull times for images that either share layers,\nor just pulling the same image.\n\nI'm not sure if this is a \"sound\" way to approach this, or if there's possibly a much easier way to go about this change. I tried\nto model it in a way that wouldn't disrupt things from a clients perspective, so the logic lives in the metadata snapshotter\nlayer. The gist of this change is if a new RemoteContext option is specified, the snapshotter now keeps track of what active\nsnapshots are \"in progress\". Any other snapshots that call Prepare with the same key as a snapshot that is already in progress\nwill now simply wait for one of two things to occur.\n1. The first active snapshot it's waiting on gets removed via `Remove` (so it was never committed). For this case there\nwas likely an error during setup for the first snapshot/unpack, so any waiters continue as normal for this branch and create a new snapshot.\n2. First active s", + "codeSnippets": [ + "Both content backoff retry and unnecessary unpack impacts the performance.\r\n\r\n### Solution:\r\n\r\nIntroduced the duplicate suppression in fetch and unpack context. The\r\ndeplicate suppression uses key-mutex and single-waiter-notify to support\r\nsingleflight. The caller can use the duplicate suppression in different\r\nPullImage handlers so that we can avoid unnecessary unpack and spin-lock\r\nin OpenWriter.\r\n\r\n### Test Result:\r\n\r\n#### Before enhancement:", + "Both content backoff retry and unnecessary unpack impacts the performance.\r\n\r\n### Solution:\r\n\r\nIntroduced the duplicate suppression in fetch and unpack context. The\r\ndeplicate suppression uses key-mutex and single-waiter-notify to support\r\nsingleflight. The caller can use the duplicate suppression in different\r\nPullImage handlers so that we can avoid unnecessary unpack and spin-lock\r\nin OpenWriter.\r\n\r\n### Test Result:\r\n\r\n#### Before enhancement:", + "#### With this enhancement:", + "### Test Script:\r\n\r\nlocalhost:5000/{redis|golang}:latest is equal to\r\ndocker.io/library/{redis|golang}:latest. The image is hold in local registry\r\nservice by `docker run -d -p 5000:5000 --name registry registry:2`." + ] + } + }, + "metadata": { + "tags": [ + "containerd", + "graduated", + "runtime", + "analyze", + "area-cri", + "kind-performance" + ], + "cncfProjects": [ + "containerd" + ], + "targetResourceKinds": [ + "Service", + "Job" + ], + "difficulty": "intermediate", + "issueTypes": [ + "analyze" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/containerd/containerd/pull/6702", + "repo": "https://github.com/containerd/containerd", + "pr": "https://github.com/containerd/containerd/pull/6318" + }, + "reactions": 18, + "comments": 24, + "synthesizedBy": "regex", + "qualityScore": 68 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with containerd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:47.415Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/containerd/containerd-8287-add-support-for-userns-in-stateless-and-stateful-pods-with-idmap.json b/solutions/cncf-generated/containerd/containerd-8287-add-support-for-userns-in-stateless-and-stateful-pods-with-idmap.json new file mode 100644 index 00000000..26dd05e8 --- /dev/null +++ b/solutions/cncf-generated/containerd/containerd-8287-add-support-for-userns-in-stateless-and-stateful-pods-with-idmap.json @@ -0,0 +1,86 @@ +{ + "version": "kc-mission-v1", + "name": "containerd-8287-add-support-for-userns-in-stateless-and-stateful-pods-with-idmap", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "containerd: Add support for userns in stateless and stateful pods with idmap mounts (KEP-127, k8s >= 1.27)", + "description": "This adds support in containerd for k8s stateless and stateful pods with user namespaces as implemented in k8s >= 1.27. Kubernetes 1.28 added stateful pod support, but no other changes are needed in containerd, we just use idmap mounts for all volumes (stateless, like configmaps, or stateful, like hostPath volumes).\n\nWe have some requirements:\n * The filesystems should support idmap mounts. The most late adition was tmpfs, that we merged support in Linux 6.3 for idmap mounts, so in practice you ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This adds support in containerd for k8s stateless and stateful pods with user namespaces as implemented in k8s >= 1.27. Kubernetes 1.28 added stateful pod support, but no other changes are needed in c" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ sudo bin/ctr --address /run/containerd-rata/containerd.sock container create --config tmp/config-userns-idmap.json rata-test\r\n$ sudo bin/ctr --address /run/containerd-rata/containerd.sock t start rata-test\r\nctr: failed to create shim task: failed to detect OCI runtime features: OCI runtime doesn't support idmap mounts: missing `mountExtensions.idmap` entry in `features` command: unknown\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/containerd/containerd/pull/5890. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "\n\n

Hi,
this is this is initial support of idmapped mount points in containerd. The original PR was published by @mauriciovasquezbernal here =1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with containerd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:29:13.428Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/continuous-optimization/continuous-optimization-10-added-node-chaos-scenarios.json b/solutions/cncf-generated/continuous-optimization/continuous-optimization-10-added-node-chaos-scenarios.json new file mode 100644 index 00000000..29e0adb3 --- /dev/null +++ b/solutions/cncf-generated/continuous-optimization/continuous-optimization-10-added-node-chaos-scenarios.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "continuous-optimization-10-added-node-chaos-scenarios", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "continuous-optimization: Added node chaos scenarios", + "description": "This commit:\n- Adds a node scenario to stop and start an instance\n- Adds a node scenario to terminate an instance\n- Adds a node scenario to reboot an instance\n- Adds a node scenario to stop the kubelet\n- Adds a node scenario to crash the node\n\nFixes: #8", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This commit:\n- Adds a node scenario to stop and start an instance\n- Adds a node scenario to terminate an instance\n- Adds a node scenario to reboot an instance\n- Adds a node scenario to stop the kubele" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nTry invoking oc debug node/ip-10-0-203-2.us-east-2.compute.internal -- chroot /host dd if=/dev/urandom of=/proc/sysrq-trigger\r\n2020-06-23 11:01:31,957 [INFO] Scenario: {'node_scenarios': [{'name': 'Fork bomb the node', 'actions': ['node_crash'], 'label_selector': 'node-role.kubernetes.io/worker', 'instance_kill_count': 1, 'timeout': 20, 'cloud_type': 'aws'}]} has been successfully injected!\r\n2020-06-23 11:01:31,957 [INFO] Waiting for the specified duration: 60\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/krkn-chaos/krkn/pull/18. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This covers the stop kubelet and node crash scenarios Mike mentioned in issue: https://github.com/openshift-scale/kraken/issues/8\n\nI based a lot of my more general set up of the node kill scenario yaml file from the below and this could be combined at some point https://github.com/openshift-scale/kraken/pull/10/files \n\nThese 2 scenarios are not cloud specific. \nThis is a first pass of stopping kubelet. I was hoping to add the ability to stop, wait, and then restart of kubelet but I was not able to get it to work properly.\nI also had to find a separate command then the one Mike mentioned for the fork bomb scenario.", + "codeSnippets": [ + "Try invoking oc debug node/ip-10-0-203-2.us-east-2.compute.internal -- chroot /host dd if=/dev/urandom of=/proc/sysrq-trigger\r\n2020-06-23 11:01:31,957 [INFO] Scenario: {'node_scenarios': [{'name': 'Fork bomb the node', 'actions': ['node_crash'], 'label_selector': 'node-role.kubernetes.io/worker', 'instance_kill_count': 1, 'timeout': 20, 'cloud_type': 'aws'}]} has been successfully injected!\r\n2020-06-23 11:01:31,957 [INFO] Waiting for the specified duration: 60", + "Try invoking oc debug node/ip-10-0-203-2.us-east-2.compute.internal -- chroot /host dd if=/dev/urandom of=/proc/sysrq-trigger\r\n2020-06-23 11:01:31,957 [INFO] Scenario: {'node_scenarios': [{'name': 'Fork bomb the node', 'actions': ['node_crash'], 'label_selector': 'node-role.kubernetes.io/worker', 'instance_kill_count': 1, 'timeout': 20, 'cloud_type': 'aws'}]} has been successfully injected!\r\n2020-06-23 11:01:31,957 [INFO] Waiting for the specified duration: 60", + "(venv3) prubenda@prubenda-mac kraken % oc get nodes\r\nNAME STATUS ROLES AGE VERSION\r\nip-10-0-132-58.us-east-2.compute.internal Ready worker 125m v1.18.3+91d0edd\r\nip-10-0-138-72.us-east-2.compute.internal Ready master 136m v1.18.3+91d0edd\r\nip-10-0-176-148.us-east-2.compute.internal Ready master 136m v1.18.3+91d0edd\r\nip-10-0-183-154.us-east-2.compute.internal Ready worker 126m v1.18.3+91d0edd\r\nip-10-0-203-2.us-east-2.compute.internal NotReady worker 126m v1.18.3+91d0edd\r\nip-10-0-210-102.us-east-2.compute.internal Ready master 135m v1.18.3+91d0edd" + ] + } + }, + "metadata": { + "tags": [ + "continuous-optimization", + "sandbox", + "orchestration", + "troubleshoot" + ], + "cncfProjects": [ + "continuous-optimization" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/krkn-chaos/krkn/pull/10", + "repo": "https://github.com/krkn-chaos/krkn", + "pr": "https://github.com/krkn-chaos/krkn/pull/18" + }, + "reactions": 1, + "comments": 19, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with continuous-optimization installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:39:29.597Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/contour/contour-5672-wait-for-cache-sync-and-dag-build-before-starting-xds-server.json b/solutions/cncf-generated/contour/contour-5672-wait-for-cache-sync-and-dag-build-before-starting-xds-server.json new file mode 100644 index 00000000..bd5d5db1 --- /dev/null +++ b/solutions/cncf-generated/contour/contour-5672-wait-for-cache-sync-and-dag-build-before-starting-xds-server.json @@ -0,0 +1,76 @@ +{ + "version": "kc-mission-v1", + "name": "contour-5672-wait-for-cache-sync-and-dag-build-before-starting-xds-server", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "contour: wait for cache sync and DAG build before starting xDS server", + "description": "I've removed the `x.mgr.GetCache().WaitForCacheSync()` call as it's implicitly handled in the `mgr.Start()` flow.\n\nAs a TLDR, the PR prevents starting the XDS server and building the DAG until the cache is synced with the initial list of k8s objects.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "I've removed the `x.mgr.GetCache().WaitForCacheSync()` call as it's implicitly handled in the `mgr.Start()` flow.\n\nAs a TLDR, the PR prevents starting the XDS server and building the DAG until the cac" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n| [Files](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour) | Coverage Δ | |\n|---|---|---|\n| [internal/featuretests/v3/featuretests.go](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-aW50ZXJuYWwvZmVhdHVyZXRlc3RzL3YzL2ZlYXR1cmV0ZX\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "I have one more observation, but otherwise the change looks good to me :+1:\n\nAssume we have a lot of resources that require status update, let's say 2000 `HTTPProxies` with status marked as `invalid`, and the status now needs to be updated `valid` at once, because the error condition was fixed during Contour was down. \n\nThere seems to be a chance now, that Contour will NOT start XDS server before the statuses have been pushed to Kubernetes. It happens like following:\n\n_Sometimes_ Contour manages to acquire lease before client-go sync has finalised. `StatusUpdateHandler` will get started and processing status updates is enabled:\n\n```\ntime=\"2023-08-25T14:16:15+03:00\" level=info msg=\"attempting to acquire leader lease projectcontour/leader-elect...\\n\" caller=\"leaderelection.go:245\" context=kubernetes\ntime=\"2023-08-25T14:16:15+03:00\" level=info msg=\"successfully acquired lease projectcontour/leader-elect\\n\" caller=\"leaderelection.go:255\" context=kubernetes\ntime=\"2023-08-25T14:16:15+03:00\" level=info msg=\"started status update handler\" context=StatusUpdateHandler\ntime=\"2023-08-25T14:16:15+03:00\" level=info msg=\"received a new address for status.loadBalancer\" context=loadBalancerStatusWriter loadbalancer-address=\ntime=\"2023-08-25T14:16:15+03:00\" level=info msg=\"performing delayed update\" context=contourEventHandler last_update=239.3048ms outstanding=3984\n```\n\nIn this case the processing of status updates happens within `rebuildDAG()` before we have set `e.initialDagBuilt = true`. D", + "codeSnippets": [ + "| [Files](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour) | Coverage Δ | |\n|---|---|---|\n| [internal/featuretests/v3/featuretests.go](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-aW50ZXJuYWwvZmVhdHVyZXRlc3RzL3YzL2ZlYXR1cmV0ZX", + "| [Files](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour) | Coverage Δ | |\n|---|---|---|\n| [internal/featuretests/v3/featuretests.go](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-aW50ZXJuYWwvZmVhdHVyZXRlc3RzL3YzL2ZlYXR1cmV0ZXN0cy5nbw==) | `86.60% <100.00%> (-0.05%)` | :arrow_down: |\n| [internal/contour/handler.go](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-aW50ZXJuYWwvY29udG91ci9oYW5kbGVyLmdv) | `82.16% <61.29%> (-6.07%)` | :arrow_down: |\n| [cmd/contour/serve.go](https://app.codecov.io/gh/projectcontour/contour/pull/5672?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour#diff-Y21kL2NvbnRvdXIvc2VydmUuZ28=) | `19.82% <0.00%> (-0.31%)` | :arrow_down: |\n\n... and [1 file with indirect coverage changes](https://app.codecov.io/gh/projectcontour/contour/pull/5672/indirect-changes?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectcontour)\n\n

\nThere's actually a better way to implement this. The server should only wait for the first DAG build. The handler itself then would ensure the first DAG build is done after the cache sync.\r\n\r\nI'll push a commit to implement this.\n> Nice work @therealak12!\r\n> \r\n> So, my understanding from this is that `WaitForCacheSync()` might have worked - if we did NOT process the events asynchronously. Since client-go will be unaware of our background processing it returns while we still process our own queue. For this kind of situations, client-go offers utility [`SingleFileTracker`](https://github.com/kubernetes/client-go/blob/e8815ff156658df0fd9284683c5fdcda51a681dc/tools/cache/synctrack/synctrack.go#L80), where user signals their async processing by using `Start()` and `Finished()` call-pair per resource in initial list, and `HasSynced()` which additionally covers the initial queue inside client.go. Is that correct?\r\n> \r\n> I added some small questions inline as well.\r\n> \r\n> The change makes sense to me and seems to work on my machine as well 🙂 👍\r\n> \r\n> Would you add a changelog entry as well?\r\n\r\nThanks for your review and comments. I try to explain what happens exactly:\r\n\r\n- The manager's `WaitForCacheSync` waits until the initial list of Kubernetes objects is delivered to the informers.\r\n- We register our handlers to these informers and ignore the returned `HasSynced` methods.\r\n- Although the full list of initial objects exists in the informers, they're not necessarily handled by the contour handler and thus they don't necessarily exist in the contour's internal cache.\r\n- The DAG rebuild goroutine starts rebuilding DAG and updating Ingress objects *based on its own cache*\r\n\r\nThis PR waits until all of the `HasSynced` methods that are received when registering handlers, return true, and then starts the DAG rebuild process.\r\n\r\nThose `HasSynced` methods would return true when `OnAdd` method of the handler is called for all of the objects in the initial list. If we only rely on these `HasSynced` methods, we may start DAG rebuild process before putting the last object in the cache! (The last object because we use an unbuffered channel and thus OnAdd is blocked until the current object is read by the goroutine.)\r\n\r\nThis is why I've used the `SingleFileTracker`. It's decremented each time the `OnAdd` is called for an object of the initial list and incremented when its handling is done. So if syncTracker.HasSynced returns true, it means we are not processing any objects at that moment.\nI have one more observation, but otherwise the change looks good to me :+1:\r\n\r\nAssume we have a lot of resources that require status update, let's say 2000 `HTTPProxies` with status marked as `invalid`, and the status now needs to be updated `valid` at once, because the error condition was fixed during Contour was down. \r\n\r\nThere seems to be a chance now, that Contour will NOT start XDS server before the statuses have been pushed to Kubernetes. It happens like following:\r\n\r\n_Sometimes_ Contour manages to acquire lease before client-go sync has finalised. `StatusUpdateHandler` will get started and processing status updates is enabled:", + "In this case the processing of status updates happens within `rebuildDAG()` before we have set `e.initialDagBuilt = true`. Due to the default client rate limits (adjustable by `--kubernetes-client-qps` and `--kubernetes-client-burst`) the XDS server will be down for quite a while, depending on how many statuses there are to update. \r\n\r\nIn my test it took 7 minutes to update 2000 `HTTPProxies` until I got this:", + "Since the XDS server does not depend on statuses, I think it would make sense to set `initialDagBuilt = True` to start XDS server **before** looping and sending the status updates: https://github.com/projectcontour/contour/blob/68bafab3d1bcc6fd1436b579e11ae357f966d7bd/internal/contour/handler.go#L243-L250\r\n\r\nCc @sunjayBhatia, @skriss \n> I have one more observation, but otherwise the change looks good to me 👍\r\n> \r\n> Assume we have a lot of resources that require status update, let's say 2000 `HTTPProxies` with status marked as `invalid`, and the status now needs to be updated `valid` at once, because the error condition was fixed during Contour was down.\r\n> \r\n> There seems to be a chance now, that Contour will NOT start XDS server before the statuses have been pushed to Kubernetes. It happens like following:\r\n> \r\n> _Sometimes_ Contour manages to acquire lease before client-go sync has finalised. `StatusUpdateHandler` will get started and processing status updates is enabled:\r\n> \r\n>", + "> \r\n> In this case the processing of status updates happens within `rebuildDAG()` before we have set `e.initialDagBuilt = true`. Due to the default client rate limits (adjustable by `--kubernetes-client-qps` and `--kubernetes-client-burst`) the XDS server will be down for quite a while, depending on how many statuses there are to update.\r\n> \r\n> In my test it took 7 minutes to update 2000 `HTTPProxies` until I got this:\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "contour", + "incubating", + "networking", + "troubleshoot", + "release-note-minor" + ], + "cncfProjects": [ + "contour" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/projectcontour/contour/pull/5672", + "repo": "https://github.com/projectcontour/contour" + }, + "reactions": 8, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with contour installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:15.817Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-2447-add-support-for-dual-stack-ipv6.json b/solutions/cncf-generated/cri-o/cri-o-2447-add-support-for-dual-stack-ipv6.json new file mode 100644 index 00000000..19c9e360 --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-2447-add-support-for-dual-stack-ipv6.json @@ -0,0 +1,98 @@ +{ + "version": "kc-mission-v1", + "name": "cri-o-2447-add-support-for-dual-stack-ipv6", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "cri-o: Add support for dual stack IPv6", + "description": "We have to vendor the latest Kubernetes master as well as OCICNI to get\nthe latest support for dual-stack IPv6. This adds an `AdditionalIps`\nfield to the network status, which will be internally handled as a\nsimple slice of strings. This means that the pod annotations now can\ncontain multiple IPs as well. The same applied to the inspect HTTP API.\n\nRelates to: https://github.com/kubernetes/kubernetes/pull/73977\nCurrent status: Under testing since the kubernetes PR has been merged.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "We have to vendor the latest Kubernetes master as well as OCICNI to get\nthe latest support for dual-stack IPv6. This adds an `AdditionalIps`\nfield to the network status, which will be internally handl" + }, + { + "title": "Only the first two cidrs are used (soft limits for Alpha, might be lifted lat...", + "description": "Only the first two cidrs are used (soft limits for Alpha, might be lifted later on)." + }, + { + "title": "Only the \"RangeAllocator\" (default) is allowed as a value for --cidr-allocato...", + "description": "Only the \"RangeAllocator\" (default) is allowed as a value for --cidr-allocator-type . Cloud allocators are not compatible with ipv6dualstack" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nCC @lachie83 @thockin\r\n\r\n\r\n### What is in the box? (alpha status)\r\n- Node ipam controller now supports multi-cidr per node (node.Spec.PodCIDRs).\r\n- Pod.PodStatus.PodIPs supports multiple IPs.\r\n- Route Controller now supports creating routes `per node's cidr`.\r\n- kubenet now supports dualstack. \r\n- Azure support `Route() interface` implementation for dualstack.\r\n- Known issue: kubenet will always force reporting ipv4, ipv6 (irrespective of podCIDRs order).\r\n- updates to CRI to support reporting m\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubernetes/kubernetes/pull/73977. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "**What type of PR is this?**\n\n**Implements**: https://github.com/kubernetes/enhancements/pull/808\nIncluded:\n\nCC @lachie83 @thockin\n\n### What is in the box? (alpha status)\n- Node ipam controller now supports multi-cidr per node (node.Spec.PodCIDRs).\n- Pod.PodStatus.PodIPs supports multiple IPs.\n- Route Controller now supports creating routes `per node's cidr`.\n- kubenet now supports dualstack. \n- Azure support `Route() interface` implementation for dualstack.\n- Known issue: kubenet will always force reporting ipv4, ipv6 (irrespective of", + "codeSnippets": [ + "CC @lachie83 @thockin\r\n\r\n\r\n### What is in the box? (alpha status)\r\n- Node ipam controller now supports multi-cidr per node (node.Spec.PodCIDRs).\r\n- Pod.PodStatus.PodIPs supports multiple IPs.\r\n- Route Controller now supports creating routes `per node's cidr`.\r\n- kubenet now supports dualstack. \r\n- Azure support `Route() interface` implementation for dualstack.\r\n- Known issue: kubenet will always force reporting ipv4, ipv6 (irrespective of podCIDRs order).\r\n- updates to CRI to support reporting m", + "CC @lachie83 @thockin\r\n\r\n\r\n### What is in the box? (alpha status)\r\n- Node ipam controller now supports multi-cidr per node (node.Spec.PodCIDRs).\r\n- Pod.PodStatus.PodIPs supports multiple IPs.\r\n- Route Controller now supports creating routes `per node's cidr`.\r\n- kubenet now supports dualstack. \r\n- Azure support `Route() interface` implementation for dualstack.\r\n- Known issue: kubenet will always force reporting ipv4, ipv6 (irrespective of podCIDRs order).\r\n- updates to CRI to support reporting multiple IPs per `PodSandBox`.\r\n\r\n### Known Issues\r\n- Cluster ipv6 CIDRS mask bigger than `24` will fail\r\n- ipv6 cidr assignment is using the default ipv4 cidr `/24` (Future: add controls over v6 cidr size)\r\n- kubenet forces `v4,v6` reporting of IPs, users who must`v6,v4` as `--cluster-cidr` \r\n- Masquerading is not done by kubenet. Users will have to use `ip-masq-agent` to perform masquerading correctly for ipv6. A standing PR has been created to support this feature https://github.com/kubernetes-incubator/ip-masq-agent/pull/45 \n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *saschagrunert*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=cri-o%2Fcri-o).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[OWNERS](https://github.com/cri-o/cri-o/blob/master/OWNERS)~~ [saschagrunert]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n[APPROVALNOTIFIER] This PR is **APPROVED**\n\nThis pull-request has been approved by: *saschagrunert*\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=cri-o%2Fcri-o).\n\nThe pull request process is described [here](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process)\n\n
\nNeeds approval from an approver in each of these files:\n\n- ~~[OWNERS](https://github.com/cri-o/cri-o/blob/master/OWNERS)~~ [saschagrunert]\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n# [Codecov](https://codecov.io/gh/cri-o/cri-o/pull/2447?src=pr&el=h1) Report\n> Merging [#2447](https://codecov.io/gh/cri-o/cri-o/pull/2447?src=pr&el=desc) into [master](https://codecov.io/gh/cri-o/cri-o/commit/dab780e903a4f6b802f1519a2610b685103df80e?src=pr&el=desc) will **decrease** coverage by `0.38%`.\n> The diff coverage is `15.55%`.", + "No, the (new) `podIPs` array in the pod object still contains only the ipv4 address;", + "I applied the PR with;", + "Here is the log from `kubelet`;" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "troubleshoot", + "size-xxl", + "sig-network", + "lgtm", + "approved", + "sig-storage", + "sig-apps", + "sig-cloud-provider", + "dco-signoff--yes" + ], + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/cri-o/cri-o/pull/2447", + "repo": "https://github.com/cri-o/cri-o", + "pr": "https://github.com/kubernetes/kubernetes/pull/73977" + }, + "reactions": 1, + "comments": 41, + "synthesizedBy": "regex", + "qualityScore": 70 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with cri-o installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:29:59.034Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/cri-o/cri-o-579-implement-non-terminal-attach.json b/solutions/cncf-generated/cri-o/cri-o-579-implement-non-terminal-attach.json new file mode 100644 index 00000000..46fa1950 --- /dev/null +++ b/solutions/cncf-generated/cri-o/cri-o-579-implement-non-terminal-attach.json @@ -0,0 +1,77 @@ +{ + "version": "kc-mission-v1", + "name": "cri-o-579-implement-non-terminal-attach", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "cri-o: Implement non-terminal attach", + "description": "We use a SOCK_SEQPACKET socket for the attach unix domain socket, which\nmeans the kernel will ensure that the reading side only ever get the\ndata from one write operation. We use this for frameing, where the\nfirst byte is the pipe that the next bytes are for. We have to make sure\nthat all reads from the socket are using at least the same size of buffer\nas the write side, because otherwise the extra data in the message\nwill be dropped.\n\nThis also adds a stdin pipe for the container, similar to th", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "We use a SOCK_SEQPACKET socket for the attach unix domain socket, which\nmeans the kernel will ensure that the reading side only ever get the\ndata from one write operation. We use this for frameing, wh" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nSTEP: executing a command with run --rm and attach with stdin\r\nJun 9 18:48:45.944: INFO: Running '/home/amurdaca/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/var/run/kubernetes/admin.kubeconfig --namespace=e2e-tests-kubectl-fd7qc run e2e-test-rm-busybox-job --image=gcr.io/google_containers/busybox:1.24 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed''\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "seems like this isn't fixing the last test, which does:\n```\nSTEP: executing a command with run --rm and attach with stdin\nJun 9 18:48:45.944: INFO: Running '/home/amurdaca/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/var/run/kubernetes/admin.kubeconfig --namespace=e2e-tests-kubectl-fd7qc run e2e-test-rm-busybox-job --image=gcr.io/google_containers/busybox:1.24 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed''\n```\nso we want this command to work:\n```\nkubectl run e2e-test --image=gcr.io/google_containers/busybox:1.24 --rm=true -- generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed'\n```", + "codeSnippets": [ + "STEP: executing a command with run --rm and attach with stdin\r\nJun 9 18:48:45.944: INFO: Running '/home/amurdaca/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/var/run/kubernetes/admin.kubeconfig --namespace=e2e-tests-kubectl-fd7qc run e2e-test-rm-busybox-job --image=gcr.io/google_containers/busybox:1.24 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed''", + "STEP: executing a command with run --rm and attach with stdin\r\nJun 9 18:48:45.944: INFO: Running '/home/amurdaca/go/src/k8s.io/kubernetes/_output/bin/kubectl --server=https://localhost:6443 --kubeconfig=/var/run/kubernetes/admin.kubeconfig --namespace=e2e-tests-kubectl-fd7qc run e2e-test-rm-busybox-job --image=gcr.io/google_containers/busybox:1.24 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed''", + "kubectl run e2e-test --image=gcr.io/google_containers/busybox:1.24 --rm=true -- generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed'", + " alexlarsson, I am getting a Failed to create container in the logs when I run the command\r\n ./cluster/kubectl.sh run e2e-test --image=gcr.io/google_containers/busybox:1.24 -- generator=job/v1 --restart=Never --attach=true --stdin -- sh -c cat && echo 'stdin closed'", + "./kubectl run e2e-test --image=gcr.io/google_containers/busybox:1.24 -- generator=job/v1 --restart=Never --attach=true --stdin -- sh -c cat && echo 'stdin closed'\r\ndeployment \"e2e-test\" created\r\nstdin closed" + ] + } + }, + "metadata": { + "tags": [ + "cri-o", + "graduated", + "runtime", + "troubleshoot", + "kube-1-8-x", + "cherry-picked" + ], + "cncfProjects": [ + "cri-o" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/cri-o/cri-o/pull/579", + "repo": "https://github.com/cri-o/cri-o" + }, + "reactions": 5, + "comments": 37, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with cri-o installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:29:51.190Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-2093-implement-multiple-source-field-patch-type.json b/solutions/cncf-generated/crossplane/crossplane-2093-implement-multiple-source-field-patch-type.json new file mode 100644 index 00000000..ef75234b --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-2093-implement-multiple-source-field-patch-type.json @@ -0,0 +1,83 @@ +{ + "version": "kc-mission-v1", + "name": "crossplane-2093-implement-multiple-source-field-patch-type", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "crossplane: Implement multiple-source-field patch type", + "description": "### Description of your changes\nThis PR adds a new patch type, `FromManyCompositeFieldPaths`, which accepts a list of composite field paths.\n\nWhen the patch is applied, these multiple values are retrieved from the composite. If any value cannot be retrieved, the patch is aborted.\n\nThe `Transform` system has been modified to allow Transforms to run on multiple values. Standard transforms will output the same number of values as they are passed.\n\nThe `String` transform will now take any number of ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Description of your changes\nThis PR adds a new patch type, `FromManyCompositeFieldPaths`, which accepts a list of composite field paths.\n\nWhen the patch is applied, these multiple values are retri" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: example.org/v1alpha1 \r\nkind: Test\r\nmetadata:\r\n name: test-one\r\n annotations:\r\n woo: lalala\r\n labels:\r\n label: test\r\nspec:\r\n parameters:\r\n fieldOne: one\r\n fieldTwo: two\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/crossplane/crossplane/pull/2298. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### Description of your changes\n\nThis implements a `FromConstantValue` patch type for compositions. \n\nI have:\n\n- [X] Read and followed Crossplane's [contribution process].\n- [X] Run `make reviewable test` to ensure this PR is ready for review.\n\n### How has this code been tested\n\nI have created a Composition test at https://github.com/stevendborrelli/composition-examples/tree/main/FromConstantValuePatch and tested it against a 1.2.1 Crossplane build.\n\nThis results in a DB being created with the properties:\n```yaml\napiVersion: database.gcp.crossplane.io/v1beta1\nkind: CloudSQLInstance\nmetadata:\n annotations:\n crossplane.io/composition-resource-name: cloudsqlinstance\n crossplane.io/external-name: cv-test-db-tbzwp-x2dqn\n test.crossplane.io/constant-value: constant annotation\n creationTimestamp: \"2021-05-07T18:19:04Z\"\n finalizers:\n - finalizer.managedresource.crossplane.io\n generateName: cv-test-db-tbzwp-\n generation: 4\n labels:\n crossplane.io/claim-name: cv-test-db\n crossplane.io/claim-namespace: default\n crossplane.io/composite: cv-test-db-tbzwp\n name: cv-test-db-tbzwp-x2dqn\n ownerReferences:\n - apiVersion: database.example.org/v1alpha1\n controller: true\n kind: CompositePostgreSQLInstance\n name: cv-test-db-tbzwp\n uid: acd55391-8df9-4f69-835a-e12f8b17371c\n resourceVersion: \"266474\"\n uid: 096bb318-caac-4c80-9a0f-e0c6c0f1ee18\nspec:\n forProvider:\n databaseVersion: POSTGRES_9_6\n g", + "codeSnippets": [ + "apiVersion: example.org/v1alpha1 \r\nkind: Test\r\nmetadata:\r\n name: test-one\r\n annotations:\r\n woo: lalala\r\n labels:\r\n label: test\r\nspec:\r\n parameters:\r\n fieldOne: one\r\n fieldTwo: two", + "apiVersion: example.org/v1alpha1 \r\nkind: Test\r\nmetadata:\r\n name: test-one\r\n annotations:\r\n woo: lalala\r\n labels:\r\n label: test\r\nspec:\r\n parameters:\r\n fieldOne: one\r\n fieldTwo: two", + "# Create XRD with some default source values\r\napiVersion: apiextensions.crossplane.io/v1\r\nkind: CompositeResourceDefinition\r\nmetadata:\r\n name: compositetests.example.org\r\nspec:\r\n group: example.org\r\n names:\r\n kind: CompositeTest\r\n plural: compositetests\r\n claimNames:\r\n kind: Test\r\n plural: tests\r\n defaultCompositionRef:\r\n name: test\r\n versions:\r\n - name: v1alpha1\r\n referenceable: true\r\n served: true\r\n schema:\r\n openAPIV3Schema:\r\n type: object\r\n properties:\r\n spec:\r\n type: object\r\n properties:\r\n parameters:\r\n type: object\r\n properties:\r\n fieldOne:\r\n type: string\r\n fieldTwo:\r\n type: string\r\n required:\r\n - parameters", + "apiVersion: apiextensions.crossplane.io/v1\r\nkind: Composition\r\nmetadata:\r\n name: test \r\nspec:\r\n compositeTypeRef:\r\n apiVersion: example.org/v1alpha1\r\n kind: CompositeTest\r\n patchSets:\r\n - name: Metadata\r\n patches:\r\n - fromFieldPath: metadata.labels\r\n - fromFieldPath: metadata.labels[\"crossplane.io/claim-namespace\"]\r\n toFieldPath: metadata.namespace\r\n\r\n - name: Parameters\r\n patches:\r\n - type: FromMultipleCompositeFieldPaths\r\n # Retrieve two values from XRD parameters\r\n fromMultipleFieldPaths: \r\n - spec.parameters.fieldOne\r\n - spec.parameters.fieldTwo\r\n toFieldPath: metadata.annotations[\"description\"]\r\n transforms:\r\n # Map each field value to another value\r\n # [\"one\",\"two\"] -> [\"1\",\"2\"]\r\n - type: map\r\n map:\r\n one: \"1\"\r\n two: \"2\"\r\n\r\n # Format all values into a single output\r\n # [\"1\",\"2\"] -> \"Field One: 31 - Field Two: 32\"\r\n - type: combine\r\n combine:\r\n type: string\r\n string:\r\n fmt: \"Field One: %x - Field Two: %x\"\r\n\r\n resources:\r\n - base:\r\n apiVersion: v1\r\n kind: Secret # Lazy testing, should not use native resources\r\n metadata:\r\n namespace:\r\n type: kubernetes.io/opaque\r\n data: {}\r\n patches:\r\n - type: PatchSet\r\n patchSetName: Metadata\r\n - type: PatchSet\r\n patchSetName: Parameters", + "apiVersion: v1\r\n kind: Secret\r\n type: kubernetes.io/opaque\r\n metadata:\r\n annotations:\r\n description: 'Field One: 31 - Field Two: 32'\r\n generateName: test-one-v55gp-\r\n labels:\r\n crossplane.io/claim-name: test-one\r\n crossplane.io/claim-namespace: default\r\n crossplane.io/composite: test-one-v55gp\r\n label: test\r\n name: test-one-v55gp-lt7j7\r\n namespace: default\r\n..." + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Secret", + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/crossplane/crossplane/pull/2093", + "repo": "https://github.com/crossplane/crossplane", + "pr": "https://github.com/crossplane/crossplane/pull/2298" + }, + "reactions": 7, + "comments": 8, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with crossplane installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:24.846Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-2352-add-combine-from-to-composite-patch-types.json b/solutions/cncf-generated/crossplane/crossplane-2352-add-combine-from-to-composite-patch-types.json new file mode 100644 index 00000000..60113f3b --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-2352-add-combine-from-to-composite-patch-types.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "crossplane-2352-add-combine-from-to-composite-patch-types", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "crossplane: Add Combine{From,To}Composite patch types", + "description": "### Description of your changes\n\nThis PR implements \"multiple source patches\" as a new pair of patch types, where multiple source values from the same resource can be combined into a single destination field. These patches take a new `combine` struct as input, allowing for expansion of the functionality in future.\n\nThese patches currently do not apply if any of the given variables are not found (whether this returns an error depends on configured patching policy).\n\nNote: Added the reverse patch ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Description of your changes\n\nThis PR implements \"multiple source patches\" as a new pair of patch types, where multiple source values from the same resource can be combined into a single destinatio" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: apiextensions.crossplane.io/v1\r\nkind: CompositeResourceDefinition\r\nmetadata:\r\n name: nops.nopresource.crossplane.dev\r\nspec:\r\n defaultCompositionRef:\r\n name: nop-resource\r\n group: nopresource.crossplane.dev\r\n names:\r\n categories:\r\n - crossplane\r\n - wrapper\r\n - nop\r\n kind: Nop\r\n plural: nops\r\n versions:\r\n - additionalPrinterColumns: []\r\n name: v1alpha1\r\n referenceable: true\r\n served: true\r\n schema:\r\n openAPIV3Schema:\r\n \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/vshn/crossplane-service-broker/pull/43. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "[![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com)\n\nThis PR contains the following updates:\n\n| Package | Type | Update | Change |\n|---|---|---|---|\n| [github.com/crossplane/crossplane](https://togithub.com/crossplane/crossplane) | require | minor | `v1.1.1` -> `v1.3.0` |\n\n---\n\n### [`v1.3.0`](https://togithub.com/crossplane/crossplane/releases/v1.3.0)\n\n[Compare Source](https://togithub.com/crossplane/crossplane/compare/v1.2.3...v1.3.0)\n\n#### Major Themes\n\nThe v1.3.0 release introduces a hotly awaited Composition feature - https://github.com/crossplane/crossplane/pull/2352 adds new `CombineFromComposite` and `CombineToComposite` patch types that can be used to patch from multiple Composite Resource (XR) fields to a single composed resource field, and vice versa.\n\n#### Notable Updates\n\n- https://github.com/crossplane/crossplane/pull/2349 - Marks the v1beta1 pkg.crossplane.io and apiextensions.pkg.crossplane.io API groups as deprecated.\n- https://github.com/crossplane/crossplane/pull/2311 - Removes support for the OAM Crossplane extension. We recommend using Crossplane with [KubeVela](https://kubevela.io/) instead.\n- https://github.com/crossplane/crossplane/pull/2352 - Adds new `CombineFromComposite` and `CombineToComposite` composition patch types.\n- https://github.com/crossplane/crossplane/pull/2180 - XRs now continue the reconcile process when they enco", + "codeSnippets": [ + "apiVersion: apiextensions.crossplane.io/v1\r\nkind: CompositeResourceDefinition\r\nmetadata:\r\n name: nops.nopresource.crossplane.dev\r\nspec:\r\n defaultCompositionRef:\r\n name: nop-resource\r\n group: nopresource.crossplane.dev\r\n names:\r\n categories:\r\n - crossplane\r\n - wrapper\r\n - nop\r\n kind: Nop\r\n plural: nops\r\n versions:\r\n - additionalPrinterColumns: []\r\n name: v1alpha1\r\n referenceable: true\r\n served: true\r\n schema:\r\n openAPIV3Schema:", + "apiVersion: apiextensions.crossplane.io/v1\r\nkind: CompositeResourceDefinition\r\nmetadata:\r\n name: nops.nopresource.crossplane.dev\r\nspec:\r\n defaultCompositionRef:\r\n name: nop-resource\r\n group: nopresource.crossplane.dev\r\n names:\r\n categories:\r\n - crossplane\r\n - wrapper\r\n - nop\r\n kind: Nop\r\n plural: nops\r\n versions:\r\n - additionalPrinterColumns: []\r\n name: v1alpha1\r\n referenceable: true\r\n served: true\r\n schema:\r\n openAPIV3Schema:\r\n properties:\r\n spec:\r\n description: A NopSpec defines the configuration of a Nop.\r\n properties:\r\n parameters:\r\n type: object\r\n description: >\r\n NopParameters are the configurable fields of\r\n a Nop.\r\n properties:\r\n nop:\r\n description: A nop is a nop.\r\n type: string\r\n default: nop\r\n enum:\r\n - nop\r\n - pon\r\n required:\r\n - parameters\r\n type: object\r\n status:\r\n description: A NopStatus defines the status of a Nop.\r\n properties:\r\n nop:\r\n description: Either a nop or a pon.\r\n type: string\r\n type: object", + "apiVersion: apiextensions.crossplane.io/v1\r\nkind: Composition\r\nmetadata:\r\n labels:\r\n provider: nop\r\n purpose: provides-nop\r\n name: nop-resource \r\nspec:\r\n compositeTypeRef:\r\n apiVersion: nopresource.crossplane.dev/v1alpha1\r\n kind: Nop\r\n resources:\r\n - name: Nop1\r\n base:\r\n apiVersion: nop.crossplane.io/v1alpha1\r\n kind: NopResource\r\n metadata:\r\n name: nop1\r\n labels:\r\n purpose: provides-nop\r\n spec:\r\n forProvider:\r\n conditionAfter:\r\n - conditionType: \"Ready\"\r\n conditionStatus: \"False\"\r\n time: \"5s\"\r\n - conditionType: \"Ready\"\r\n conditionStatus: \"True\"\r\n time: \"10s\"\r\n - conditionType: \"Synced\"\r\n conditionStatus: \"False\"\r\n time: \"30s\"\r\n - conditionType: \"Synced\"\r\n conditionStatus: \"True\"\r\n time: \"120s\"\r\n\r\n patches:\r\n - type: CombineFromComposite\r\n combine:\r\n variables:\r\n - fromFieldPath: spec.parameters.nop\r\n - fromFieldPath: metadata.uid\r\n strategy: String\r\n string:\r\n fmt: \"fromcomposite-%s-%s\"\r\n toFieldPath: metadata.labels['nop']\r\n\r\n - type: CombineToComposite\r\n combine:\r\n variables:\r\n - fromFieldPath: metadata.name\r\n - fromFieldPath: metadata.labels[\"crossplane.io/composite\"]\r\n strategy: String\r\n string:\r\n fmt: \"tocomposite-%s-%s\"\r\n toFieldPath: status.nop\r\n transforms:\r\n - type: string\r\n string:\r\n fmt: \"%s-transform-value\"\r\n\r\n# If both patches work right, the composed \r\n# resource should have a `nop` label containing\r\n# fromcomposite--\r\n# and the composite resource should have a\r\n# `nop` status containing \r\n# `tocomposite---transform-value", + "apiVersion: nopresource.crossplane.dev/v1alpha1\r\nkind: Nop\r\nmetadata:\r\n name: its-a-nop\r\nspec:\r\n parameters:\r\n nop: pon", + "apiVersion: nop.crossplane.io/v1alpha1\r\nkind: NopResource\r\nmetadata:\r\n labels:\r\n crossplane.io/composite: its-a-nop\r\n nop: fromcomposite-pon-9e67f5f3-b2d1-40d9-8ade-c3b22ee0c30f # Patched from composite\r\n purpose: provides-nop\r\n name: its-a-nop-q8jht\r\n uid: 8ac941ae-935f-4fe4-b37e-63627c2a3dea\r\nspec: {}\r\nstatus:\r\n atProvider: {}" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/crossplane/crossplane/pull/2352", + "repo": "https://github.com/crossplane/crossplane", + "pr": "https://github.com/vshn/crossplane-service-broker/pull/43" + }, + "reactions": 5, + "comments": 1, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with crossplane installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:39.866Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-2880-use-serviceaccountname-for-service-account-creation.json b/solutions/cncf-generated/crossplane/crossplane-2880-use-serviceaccountname-for-service-account-creation.json new file mode 100644 index 00000000..229f2345 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-2880-use-serviceaccountname-for-service-account-creation.json @@ -0,0 +1,88 @@ +{ + "version": "kc-mission-v1", + "name": "crossplane-2880-use-serviceaccountname-for-service-account-creation", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "crossplane: Use serviceAccountName for Service Account creation", + "description": "### Description of your changes\n\nUpdates the name of a Service Account, which is to be created by a controller, to be specified by the `.spec.serviceAccountName` field of `ControllerConfig` if specified.\n\nIt's currently named the same name of a `ProviderRevision` even if the `.spec.serviceAccountName` is specified whereas the `.spec.template.spec.serviceAccountName` in a Deployment is specified by it.\n\nThe current approach wouldn't make a lot of sense because:\n\n- ClusterRolebindings to be create", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Description of your changes\n\nUpdates the name of a Service Account, which is to be created by a controller, to be specified by the `.spec.serviceAccountName` field of `ControllerConfig` if specifi" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nCreated a `Provider` and `ProviderConfig` with `InjectedIdentity` and then created a `Topic` managed resource:\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/crossplane-contrib/provider-gcp/pull/414. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### Description of your changes\n\nAdds support for Workload Identity, with which credentials no longer need to be present in Secrets.\n\nIf `InjectedIdentity` is specified, a token source for application default credentials by a GCP Service Account, which is specified in the `iam.gke.io/gcp-service-account` annotation of a provider's Kubernetes Service Account, is used for authentication.\n\nI have:\n\n### How has this code been tested\n\n[contribution process]: https://git.io/fj2m9\n\nTested this in the following environment and process.\n\n```console\n$ kubectl version --short\nClient Version: v1.20.7\nServer Version: v1.20.12-gke.1500\n```\n\n```console\n$ gcloud container clusters describe $CLUSTER --format=\"value(workloadIdentityConfig.workloadPool)\"\n$PROJECT_ID.svc.id.goog\n```\n\n```console\n$ kubectl get deploy crossplane \\\n -o jsonpath=\"{.spec.template.spec.containers[*].image}\" \\\n -n crossplane-system\ncrossplane/crossplane:v1.6.2\n```\n\nCreated a `Provider` and `ProviderConfig` with `InjectedIdentity` and then created a `Topic` managed resource:\n\n```console\n$ cat <=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with crossplane installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:26.876Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-2938-feat-composition-add-patch-fromobjectfieldpath.json b/solutions/cncf-generated/crossplane/crossplane-2938-feat-composition-add-patch-fromobjectfieldpath.json new file mode 100644 index 00000000..4f1c455c --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-2938-feat-composition-add-patch-fromobjectfieldpath.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "crossplane-2938-feat-composition-add-patch-fromobjectfieldpath", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "crossplane: feat(composition): Add patch fromObjectFieldPath", + "description": "### Description of your changes\n\nAdds the ability to patch from any K8s object using `type: FromObjectFieldPath`.\n\nThe changes are purely additive and do not break the existing API.\n\nInternally, the controller tries to find the object using `fromObjecRef` and passes the found object to the already existing `applyFromFieldPath`.\n\nA patch would look like this: \n\n```yaml\n patches:\n - type: FromObjectFieldPath\n fromObjectRef:\n apiVersion: v1\n kind: ConfigMa", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Description of your changes\n\nAdds the ability to patch from any K8s object using `type: FromObjectFieldPath`.\n\nThe changes are purely additive and do not break the existing API.\n\nInternally, the c" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\npatches:\r\n - type: FromObjectFieldPath\r\n fromObjectRef:\r\n apiVersion: v1\r\n kind: ConfigMap\r\n name: sample-config\r\n namespace: sample-ns\r\n fromFieldPath: data.value\r\n toFieldPath: spec.forProvider.sampleField\r\n policy:\r\n fromFieldPath: Required # Dont render if referenced resource does not exist\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/crossplane/crossplane/pull/3007. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### Description of your changes\n\nDesign: https://github.com/crossplane/crossplane/pull/3008\n\nFixes: #2099 \n\nThis adds a new cluster scope CRD `EnvironmentConfig` that is similar to a standard `ConfigMap` but allows storing arbitrary values instead of just strings.\n\n`EnvironmentConfig`s can be used in compositions using the new `FromEnvironmentFieldPath` patch type.\n\nThe main use case of this PR is to use to build a generic Crossplane package that can run in multiple environment without having to hardcode environment specific values in the compositions.\n\nThis is a replacement for #2938. Based on an idea briefly mentioned by @negz in the community meeting.\n\nIt avoids the discussed security and RBAC issues by not requiring access to secrets and config maps and relying on Crossplane native resources instead.\n\nI have:\n\n### How has this code been tested\n\nManually and unit tests\n\n[contribution process]: https://git.io/fj2m9", + "codeSnippets": [ + "patches:\r\n - type: FromObjectFieldPath\r\n fromObjectRef:\r\n apiVersion: v1\r\n kind: ConfigMap\r\n name: sample-config\r\n namespace: sample-ns\r\n fromFieldPath: data.value\r\n toFieldPath: spec.forProvider.sampleField\r\n policy:\r\n fromFieldPath: Required # Dont render if referenced resource does not exist", + "patches:\r\n - type: FromObjectFieldPath\r\n fromObjectRef:\r\n apiVersion: v1\r\n kind: ConfigMap\r\n name: sample-config\r\n namespace: sample-ns\r\n fromFieldPath: data.value\r\n toFieldPath: spec.forProvider.sampleField\r\n policy:\r\n fromFieldPath: Required # Dont render if referenced resource does not exist", + "where `ref` is the referenced object and `mr` is the managed resource.\r\n\r\nPatching to XR would be done by simply swapping `mr` with `xr`.\r\n\r\nDoes this makes sense?\nWhile doing some testing I noticed that sometimes `fromObjectRef` needs to be dynamic. For example you may want your namespace to match `metadata.labels[crossplane.io/claim-namespace]`.\r\n\r\nI added two additional fields `nameFromFieldPath` and `namespaceFromFieldPath` to extract a value from an arbitrary composite field.\r\n\r\nThe resources I used for testing (using `provider-kuberntes` as sample):\r\n\r\n`composition.yaml`", + "`claim.yaml`" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Configmap", + "Namespace" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/crossplane/crossplane/pull/2938", + "repo": "https://github.com/crossplane/crossplane", + "pr": "https://github.com/crossplane/crossplane/pull/3007" + }, + "reactions": 4, + "comments": 6, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with crossplane installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:45.284Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-3007-feat-compositions-support-patching-from-environment.json b/solutions/cncf-generated/crossplane/crossplane-3007-feat-compositions-support-patching-from-environment.json new file mode 100644 index 00000000..2258a2f9 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-3007-feat-compositions-support-patching-from-environment.json @@ -0,0 +1,83 @@ +{ + "version": "kc-mission-v1", + "name": "crossplane-3007-feat-compositions-support-patching-from-environment", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "crossplane: feat(compositions): Support patching from Environment", + "description": "### Description of your changes\n\nDesign: https://github.com/crossplane/crossplane/pull/3008\n\nFixes: #2099 \n\nThis adds a new cluster scope CRD `EnvironmentConfig` that is similar to a standard `ConfigMap` but allows storing arbitrary values instead of just strings.\n\n`EnvironmentConfig`s can be used in compositions using the new `FromEnvironmentFieldPath` patch type.\n\nThe main use case of this PR is to use to build a generic Crossplane package that can run in multiple environment without having to", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Description of your changes\n\nDesign: https://github.com/crossplane/crossplane/pull/3008\n\nFixes: #2099 \n\nThis adds a new cluster scope CRD `EnvironmentConfig` that is similar to a standard `ConfigM" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nresources:\r\n - base:\r\n apiVersion: rancher2.rancher.jet.crossplane.io/v1alpha1\r\n kind: Project\r\n spec:\r\n forProvider:\r\n clusterId: ?\r\n patches:\r\n - toFieldPath: spec.forProvider.clusterId\r\n from: \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/crossplane/crossplane/pull/3008. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### Description of your changes\n\nThis design proposal aims to provide a solution for adding a third data source for composition patches next to claim/composite and composed resource by introducing a new resource `EnvironmentConfig`.\n\nIt is _not_ the goal of this design document to provide a way for patching from any Kubernetes object (aka generic referencers).\n\nCC @negz \n\nRelated to https://github.com/crossplane/crossplane/issues/2099\n\nImplementation: https://github.com/crossplane/crossplane/pull/3007\n\nI have:\n\n### How has this code been tested\n\nn.a.", + "codeSnippets": [ + "resources:\r\n - base:\r\n apiVersion: rancher2.rancher.jet.crossplane.io/v1alpha1\r\n kind: Project\r\n spec:\r\n forProvider:\r\n clusterId: ?\r\n patches:\r\n - toFieldPath: spec.forProvider.clusterId\r\n from: ", + "resources:\r\n - base:\r\n apiVersion: rancher2.rancher.jet.crossplane.io/v1alpha1\r\n kind: Project\r\n spec:\r\n forProvider:\r\n clusterId: ?\r\n patches:\r\n - toFieldPath: spec.forProvider.clusterId\r\n from: ", + "apiVersion: argoproj.io/v1alpha1\r\nkind: ApplicationSet\r\nspec:\r\n generators:\r\n - git:\r\n repoURL: https://github.com/argoproj/applicationset.git\r\n revision: HEAD\r\n directories:\r\n - path: examples/git-generator-directory/cluster-addons/*\r\n template:\r\n spec:\r\n destination:\r\n namespace: '{{path.basename}}'", + "apiVersion: argoproj.io/v1alpha1\r\nkind: Application\r\nspec:\r\n destination:\r\n namespace: 'argo-workflows'", + "resources:\r\n - base:\r\n apiVersion: rancher2.rancher.jet.crossplane.io/v1alpha1\r\n kind: Namespace\r\n metadata:\r\n name: \r\n spec:\r\n forProvider:\r\n projectId: " + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Configmap", + "Secret" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/crossplane/crossplane/pull/3007", + "repo": "https://github.com/crossplane/crossplane", + "pr": "https://github.com/crossplane/crossplane/pull/3008" + }, + "reactions": 14, + "comments": 16, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with crossplane installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:16.975Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-3939-proposal-break-up-large-providers-by-service.json b/solutions/cncf-generated/crossplane/crossplane-3939-proposal-break-up-large-providers-by-service.json new file mode 100644 index 00000000..0d593b33 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-3939-proposal-break-up-large-providers-by-service.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "crossplane-3939-proposal-break-up-large-providers-by-service", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "crossplane: Proposal: Break Up Large Providers by Service", + "description": "### Description of your changes\n\nThis design document proposes that the 6-7 largest Crossplane providers be broken down into smaller, service-scoped ones. This would help folks install fewer CRDs, thus improving the ratio of installed-to-used Crossplane CRDs. Installing fewer CRDs is necessary to workaround performance issues in the Kubernetes API server and Kubernetes clients.\n\nI have:\n\n### How has this code been tested\n\n[contribution process]: https://git.io/fj2m9\n\nI proof-read it. 😄", + "type": "analyze", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Description of your changes\n\nThis design document proposes that the 6-7 largest Crossplane providers be broken down into smaller, service-scoped ones. This would help folks install fewer CRDs, thu" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: pkg.crossplane.io/v1\r\nkind: Provider\r\nmetadata:\r\n name: crossplane-aws\r\nspec:\r\n controllerConfigRef:\r\n name: crossplane-aws\r\n ignoreCrossplaneConstraints: false\r\n package: #see pr in crossplane-contrib/aws https://github.com/crossplane-contrib/provider-aws/pull/1727\r\n packagePullPolicy: IfNotPresent\r\n revisionActivationPolicy: Automatic\r\n revisionHistoryLimit: 1\r\n skipDependencyResolution: false\r\n excludeCrds:\r\n - \\.aws\\.crossplane\\.io\r\n includeCrds:\r\n - securitygr\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/crossplane/crossplane/pull/3987. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### Description of your changes\n\nAs a prove of concept for filtering crds when installing providers we updated Provider spec to include two new array fields `includeCrds` and `excludeCrds` that can contain regex values:\n\n```yaml\napiVersion: pkg.crossplane.io/v1\nkind: Provider\nmetadata:\n name: crossplane-aws\nspec:\n controllerConfigRef:\n name: crossplane-aws\n ignoreCrossplaneConstraints: false\n package: #see pr in crossplane-contrib/aws https://github.com/crossplane-contrib/provider-aws/pull/1727\n packagePullPolicy: IfNotPresent\n revisionActivationPolicy: Automatic\n revisionHistoryLimit: 1\n skipDependencyResolution: false\n excludeCrds:\n - \\.aws\\.crossplane\\.io\n includeCrds:\n - securitygroup\\.ec2\\.aws\\.crossplane\\.io\n - securitygrouprules\\.ec2\\.aws\\.crossplane\\.io\n - \\.eks\\.aws\\.crossplane\\.io\n\n```\nThis poc avoids the proposal of breaking larges providers by service/group/type and shows the possibility to filter applied crds.\n\nWhen applying a provider with these optional fields set, the list of applied crd gets filtered. If a crd matches a value in excludeCrds, it is ignored, unless it matches a value in includeCrds. Crds that are not matched by values in excludeCrds get applied. If excludeCrds is empty, all crds will be applied.\n\nThe deployment for the provider is created with an additional env variable that is used by the provider to filter the activated controllers.\n\nThis is described and can be seen in the PR in crosplane-contrib/provider-aws: https:", + "codeSnippets": [ + "apiVersion: pkg.crossplane.io/v1\r\nkind: Provider\r\nmetadata:\r\n name: crossplane-aws\r\nspec:\r\n controllerConfigRef:\r\n name: crossplane-aws\r\n ignoreCrossplaneConstraints: false\r\n package: #see pr in crossplane-contrib/aws https://github.com/crossplane-contrib/provider-aws/pull/1727\r\n packagePullPolicy: IfNotPresent\r\n revisionActivationPolicy: Automatic\r\n revisionHistoryLimit: 1\r\n skipDependencyResolution: false\r\n excludeCrds:\r\n - \\.aws\\.crossplane\\.io\r\n includeCrds:\r\n - securitygr", + "apiVersion: pkg.crossplane.io/v1\r\nkind: Provider\r\nmetadata:\r\n name: crossplane-aws\r\nspec:\r\n controllerConfigRef:\r\n name: crossplane-aws\r\n ignoreCrossplaneConstraints: false\r\n package: #see pr in crossplane-contrib/aws https://github.com/crossplane-contrib/provider-aws/pull/1727\r\n packagePullPolicy: IfNotPresent\r\n revisionActivationPolicy: Automatic\r\n revisionHistoryLimit: 1\r\n skipDependencyResolution: false\r\n excludeCrds:\r\n - \\.aws\\.crossplane\\.io\r\n includeCrds:\r\n - securitygroup\\.ec2\\.aws\\.crossplane\\.io\r\n - securitygrouprules\\.ec2\\.aws\\.crossplane\\.io\r\n - \\.eks\\.aws\\.crossplane\\.io" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition", + "analyze", + "performance", + "proposal", + "crd-count" + ], + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "issueTypes": [ + "analyze" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/crossplane/crossplane/pull/3939", + "repo": "https://github.com/crossplane/crossplane", + "pr": "https://github.com/crossplane/crossplane/pull/3987" + }, + "reactions": 10, + "comments": 19, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with crossplane installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:23.060Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-4444-add-usage-type-for-deletion-ordering-and-resource-protection.json b/solutions/cncf-generated/crossplane/crossplane-4444-add-usage-type-for-deletion-ordering-and-resource-protection.json new file mode 100644 index 00000000..c3220741 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-4444-add-usage-type-for-deletion-ordering-and-resource-protection.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "crossplane-4444-add-usage-type-for-deletion-ordering-and-resource-protection", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "crossplane: Add `Usage` type for Deletion Ordering and Resource Protection", + "description": "### Description of your changes\n\nThis PR implements the `Usage` type proposed in [this one-pager](https://github.com/crossplane/crossplane/blob/master/design/one-pager-generic-usage-type.md) as an alpha feature.\n\nThis would enable ordered deletions when there is a usage dependency between composed resources. A typical example is having a helm `Release` together with a GKE `Cluster` resource where the release is installed into the cluster. Currently, we end up orphaned `Release` resources when th", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Description of your changes\n\nThis PR implements the `Usage` type proposed in [this one-pager](https://github.com/crossplane/crossplane/blob/master/design/one-pager-generic-usage-type.md) as an alp" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nresources:\r\n - name: cluster\r\n base:\r\n apiVersion: container.gcp.upbound.io/v1beta1\r\n kind: Cluster\r\n ...\r\n - name: release\r\n base:\r\n apiVersion: helm.crossplane.io/v1beta1\r\n kind: Release\r\n ...\r\n - name: release-uses-cluster\r\n base:\r\n apiVersion: apiextensions.crossplane.io/v1alpha1\r\n kind: Usage\r\n spec:\r\n of:\r\n apiVersion: container.gcp.upbound.io/v1beta1\r\n kind: Cluster\r\n resourceSel\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/crossplane/crossplane-runtime/pull/518. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### Description of your changes\n\nThis PR adds additional methods to the unstructured composed package.\nThese methods will be consumed by the Usage implementation as suggested [here](https://github.com/crossplane/crossplane/pull/4444#discussion_r1290648915).\n\nI have:\n\n### How has this code been tested\n\nConsume this PR on top of https://github.com/crossplane/crossplane/pull/4444 and run:\n\n```\nmake e2e E2E_TEST_FLAGS=\"-test.v --test-suite usage\"\n```\n\n[contribution process]: https://git.io/fj2m9", + "codeSnippets": [ + "resources:\r\n - name: cluster\r\n base:\r\n apiVersion: container.gcp.upbound.io/v1beta1\r\n kind: Cluster\r\n ...\r\n - name: release\r\n base:\r\n apiVersion: helm.crossplane.io/v1beta1\r\n kind: Release\r\n ...\r\n - name: release-uses-cluster\r\n base:\r\n apiVersion: apiextensions.crossplane.io/v1alpha1\r\n kind: Usage\r\n spec:\r\n of:\r\n apiVersion: container.gcp.upbound.io/v1beta1\r\n kind: Cluster\r\n resourceSel", + "resources:\r\n - name: cluster\r\n base:\r\n apiVersion: container.gcp.upbound.io/v1beta1\r\n kind: Cluster\r\n ...\r\n - name: release\r\n base:\r\n apiVersion: helm.crossplane.io/v1beta1\r\n kind: Release\r\n ...\r\n - name: release-uses-cluster\r\n base:\r\n apiVersion: apiextensions.crossplane.io/v1alpha1\r\n kind: Usage\r\n spec:\r\n of:\r\n apiVersion: container.gcp.upbound.io/v1beta1\r\n kind: Cluster\r\n resourceSelector:\r\n matchControllerRef: true\r\n by:\r\n apiVersion: helm.crossplane.io/v1beta1\r\n kind: Release\r\n resourceSelector:\r\n matchControllerRef: true", + "helm repo add crossplane-master https://charts.crossplane.io/master --force-update\r\nhelm upgrade --install crossplane --namespace crossplane-system crossplane-master/crossplane --version v1.14.0-rc.0.190.g78bdab10 --create-namespace --set image.repository=turkenh/crossplane --set image.tag=v1.14.0-rc.0.198.g9049ab9c --set \"args={--debug,--enable-usages}\"", + "❯ cd test/e2e/manifests/apiextensions/usage/standalone/\r\n❯ kubectl apply -f setup/\r\nprovider.pkg.crossplane.io/provider-nop created\r\n❯ kubectl wait provider.pkg provider-nop --for condition=healthy --timeout 2m\r\nprovider.pkg.crossplane.io/provider-nop condition met\r\n❯ kubectl apply -f with-by/\r\nusage.apiextensions.crossplane.io/using-uses-used created\r\nnopresource.nop.crossplane.io/used-resource created\r\nnopresource.nop.crossplane.io/using-resource created\r\n❯ kubectl apply -f with-reason/\r\nusage.apiextensions.crossplane.io/protect-a-resource created\r\nnopresource.nop.crossplane.io/protected-resource created\r\n❯ kubectl get usages\r\nNAME DETAILS READY AGE\r\nprotect-a-resource This resource is protected! True 15s\r\nusing-uses-used NopResource/using-resource uses NopResource/used-resource True 19s\r\n❯ kubectl delete -f with-by/used.yaml\r\nError from server (This resource is in-use by 1 Usage(s), including the Usage \"using-uses-used\" by resource NopResource/using-resource.): error when deleting \"with-by/used.yaml\": admission webhook \"nousages.apiextensions.crossplane.io\" denied the request: This resource is in-use by 1 Usage(s), including the Usage \"using-uses-used\" by resource NopResource/using-resource.\r\n❯ kubectl delete -f with-reason/used.yaml\r\nError from server (This resource is in-use by 1 Usage(s), including the Usage \"protect-a-resource\" with reason: \"This resource is protected!\".): error when deleting \"with-reason/used.yaml\": admission webhook \"nousages.apiextensions.crossplane.io\" denied the request: This resource is in-use by 1 Usage(s), including the Usage \"protect-a-resource\" with reason: \"This resource is protected!\".", + "make e2e E2E_TEST_FLAGS=\"-test.v --test-suite usage\"" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/crossplane/crossplane/pull/4444", + "repo": "https://github.com/crossplane/crossplane", + "pr": "https://github.com/crossplane/crossplane-runtime/pull/518" + }, + "reactions": 6, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 67 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with crossplane installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:35.081Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-5540-feat-configurable-ports.json b/solutions/cncf-generated/crossplane/crossplane-5540-feat-configurable-ports.json new file mode 100644 index 00000000..13dfbe4d --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-5540-feat-configurable-ports.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "crossplane-5540-feat-configurable-ports", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "crossplane: feat/configurable ports", + "description": "### Description of your changes\n\nWhen running crossplane (and providers/functions) in the host network the need to configure ports arises.\n\nThis MR has two parts:\n\n1. Make crossplane ports (service/webhook, health probe, metrics) configurable\n2. Allow `DeploymentRuntimeConfig` to override the default ports (`metrics`, `webhook`, `grpc`) in both `Deployment` and `Service`.\n\nAdditional merge requests would be necessary to\n\n- Adjust the documentation\n- Adjust the provider and function templates (to", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Description of your changes\n\nWhen running crossplane (and providers/functions) in the host network the need to configure ports arises.\n\nThis MR has two parts:\n\n1. Make crossplane ports (service/we" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n# K8s provider\r\napiVersion: pkg.crossplane.io/v1\r\nkind: Provider\r\nmetadata:\r\n name: provider-kubernetes\r\nspec:\r\n package: \"index.docker.io/crossplanecontrib/provider-kubernetes:v0.15.0\"\r\n runtimeConfigRef:\r\n name: provider-kubernetes\r\n---\r\napiVersion: pkg.crossplane.io/v1beta1\r\nkind: DeploymentRuntimeConfig\r\nmetadata:\r\n name: provider-kubernetes\r\nspec:\r\n deploymentTemplate:\r\n spec:\r\n selector: { }\r\n template:\r\n spec:\r\n hostNetwork: true\r\n nodeSelecto\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/crossplane-contrib/provider-kubernetes/pull/301. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### Description of your changes\n\nAs outlined in the issue below, when running Crossplane in certain environments (like EKS using Calico CNI), where the control plane resides outside the workload network it is necessary to run webhooks in the host network. This calls for the need to make the port configurable to avoid conflicts.\n\nI opened [crossplane/crossplane!5540](https://github.com/crossplane/crossplane/pull/5540) to allow a `DeploymentRuntimeConfig` to overwrite the ports of the crossplane managed services created for each provider / function.\n\nI have:\n\n### How has this code been tested\n\nI deployed my fork of this and crossplane in our test cluster running EKS and Calico. With this I was able to run both Crossplane and provider-kubernetes inside the host network with custom ports.\n\nThis change only makes sense if my other PR gets accepted.\n\n[contribution process]: https://git.io/fj2m9", + "codeSnippets": [ + "# K8s provider\r\napiVersion: pkg.crossplane.io/v1\r\nkind: Provider\r\nmetadata:\r\n name: provider-kubernetes\r\nspec:\r\n package: \"index.docker.io/crossplanecontrib/provider-kubernetes:v0.15.0\"\r\n runtimeConfigRef:\r\n name: provider-kubernetes\r\n---\r\napiVersion: pkg.crossplane.io/v1beta1\r\nkind: DeploymentRuntimeConfig\r\nmetadata:\r\n name: provider-kubernetes\r\nspec:\r\n deploymentTemplate:\r\n spec:\r\n selector: { }\r\n template:\r\n spec:\r\n hostNetwork: true\r\n nodeSelecto", + "# K8s provider\r\napiVersion: pkg.crossplane.io/v1\r\nkind: Provider\r\nmetadata:\r\n name: provider-kubernetes\r\nspec:\r\n package: \"index.docker.io/crossplanecontrib/provider-kubernetes:v0.15.0\"\r\n runtimeConfigRef:\r\n name: provider-kubernetes\r\n---\r\napiVersion: pkg.crossplane.io/v1beta1\r\nkind: DeploymentRuntimeConfig\r\nmetadata:\r\n name: provider-kubernetes\r\nspec:\r\n deploymentTemplate:\r\n spec:\r\n selector: { }\r\n template:\r\n spec:\r\n hostNetwork: true\r\n nodeSelector:\r\n kubernetes.io/os: linux\r\n imagePullSecrets:\r\n - name: crossplane-workaround\r\n containers:\r\n - name: package-runtime\r\n args:\r\n - --webhook-port=9610\r\n serviceTemplate:\r\n spec:\r\n ports:\r\n - name: webhook\r\n port: 9200 # doesn't matter\r\n targetPort: 9610\r\n protocol: TCP", + "apiVersion: pkg.crossplane.io/v1beta1\r\nkind: Function\r\nmetadata:\r\n name: function-patch-and-transform\r\nspec:\r\n package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0\r\n runtimeConfigRef:\r\n name: function-patch-and-transform\r\n---\r\napiVersion: pkg.crossplane.io/v1beta1\r\nkind: DeploymentRuntimeConfig\r\nmetadata:\r\n name: function-patch-and-transform\r\nspec:\r\n deploymentTemplate:\r\n spec:\r\n selector: { }\r\n template:\r\n spec:\r\n hostNetwork: true\r\n containers:\r\n - name: package-runtime\r\n args:\r\n - --address=:9612\r\n serviceTemplate:\r\n spec:\r\n ports:\r\n - name: webhook\r\n port: 9200 # doesn't matter\r\n targetPort: 9612\r\n protocol: TCP", + "hostNetwork: true\r\ndnsPolicy: \"ClusterFirstWithHostNet\"\r\nwebhooks:\r\n port: 9600\r\nmetrics:\r\n enabled: true\r\n port: 9601\r\nreadiness:\r\n port: 9602", + "apiVersion: pkg.crossplane.io/v1beta1\r\nkind: DeploymentRuntimeConfig\r\nmetadata:\r\n name: function-patch-and-transform\r\nspec:\r\n deploymentTemplate:\r\n spec:\r\n selector: { }\r\n template:\r\n spec:\r\n hostNetwork: true\r\n containers:\r\n - name: package-runtime\r\n args:\r\n - --address=:9612\r\n ports:\r\n - containerPort: 9612\r\n hostPort: 9612\r\n name: grpc\r\n protocol: TCP\r\n - containerPort: 9613\r\n hostPort: 9613\r\n name: metrics\r\n protocol: TCP\r\n serviceTemplate:\r\n spec:\r\n ports:\r\n - name: webhook\r\n port: 9200 # doesn't matter\r\n targetPort: 9612\r\n protocol: TCP" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Service" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/crossplane/crossplane/pull/5540", + "repo": "https://github.com/crossplane/crossplane", + "pr": "https://github.com/crossplane-contrib/provider-kubernetes/pull/301" + }, + "reactions": 11, + "comments": 28, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with crossplane installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:21.003Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/crossplane/crossplane-5543-support-passing-credentials-to-composition-functions.json b/solutions/cncf-generated/crossplane/crossplane-5543-support-passing-credentials-to-composition-functions.json new file mode 100644 index 00000000..0de785b1 --- /dev/null +++ b/solutions/cncf-generated/crossplane/crossplane-5543-support-passing-credentials-to-composition-functions.json @@ -0,0 +1,85 @@ +{ + "version": "kc-mission-v1", + "name": "crossplane-5543-support-passing-credentials-to-composition-functions", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "crossplane: Support passing credentials to composition functions", + "description": "### Description of your changes\n\nFixes https://github.com/crossplane/crossplane/issues/3718\n\nSome functions need credentials to talk to an external system, like AWS or a git repository. Today you can only pass credentials to a function by using a `DeploymentRuntimeConfig` to inject them as files or environment variables. This isn't ideal, as you probably want per-caller credentials, not credentials that are available to all callers.\n\nThis PR allows you to tell Crossplane what credentials to give", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "### Description of your changes\n\nFixes https://github.com/crossplane/crossplane/issues/3718\n\nSome functions need credentials to talk to an external system, like AWS or a git repository. Today you can " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: apiextensions.crossplane.io/v1\r\nkind: Composition\r\nmetadata:\r\n name: parent\r\nspec:\r\n compositeTypeRef:\r\n apiVersion: nop.example.org/v1alpha1\r\n kind: XNopResource\r\n mode: Pipeline\r\n pipeline:\r\n - step: example\r\n functionRef:\r\n name: function-needs-a-secret\r\n credentials:\r\n - name: credentials-for-something\r\n source: Secret\r\n secretRef:\r\n namespace: crossplane-system\r\n name: super-secret\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/crossplane/crossplane/pull/5808. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "### Description of your changes\n\nWe frequently give this guidance to function authors, but missed actually codifying it.\n\nI went with SHOULD NOT rather than MUST NOT mutate external systems, mostly because I'm thinking of edge cases where a read operation might bump some innocuous counter or similar. I could be convinced to switch to MUST NOT.\n\nI have: \n\nNeed help with this checklist? See the [cheat sheet].\n\n[contribution process]: https://github.com/crossplane/crossplane/tree/master/contributing\n[docs tracking issue]: https://github.com/crossplane/docs/issues/new\n[document this change]: https://docs.crossplane.io/contribute/contribute\n[cheat sheet]: https://github.com/crossplane/crossplane/tree/master/contributing#checklist-cheat-sheet", + "codeSnippets": [ + "apiVersion: apiextensions.crossplane.io/v1\r\nkind: Composition\r\nmetadata:\r\n name: parent\r\nspec:\r\n compositeTypeRef:\r\n apiVersion: nop.example.org/v1alpha1\r\n kind: XNopResource\r\n mode: Pipeline\r\n pipeline:\r\n - step: example\r\n functionRef:\r\n name: function-needs-a-secret\r\n credentials:\r\n - name: credentials-for-something\r\n source: Secret\r\n secretRef:\r\n namespace: crossplane-system\r\n name: super-secret", + "apiVersion: apiextensions.crossplane.io/v1\r\nkind: Composition\r\nmetadata:\r\n name: parent\r\nspec:\r\n compositeTypeRef:\r\n apiVersion: nop.example.org/v1alpha1\r\n kind: XNopResource\r\n mode: Pipeline\r\n pipeline:\r\n - step: example\r\n functionRef:\r\n name: function-needs-a-secret\r\n credentials:\r\n - name: credentials-for-something\r\n source: Secret\r\n secretRef:\r\n namespace: crossplane-system\r\n name: super-secret", + "In Python (I think) it looks like this:", + "This is just the \"bare\" protobuf-generated code. We could add wrappers to the function SDKs if we think they'd add a better UX.\r\n\r\nI have: \r\n\r\n- [x] Read and followed Crossplane's [contribution process].\r\n- [x] Run `make reviewable` to ensure this PR is ready for review.\r\n- [x] Added or updated unit tests.\r\n- [x] Added or updated e2e tests.\r\n- [x] Linked a PR or a [docs tracking issue] to [document this change].\r\n- [ ] ~Added `backport release-x.y` labels to auto-backport this PR.~\r\n\r\nNeed help with this checklist? See the [cheat sheet].\r\n\r\n[contribution process]: https://github.com/crossplane/crossplane/tree/master/contributing\r\n[docs tracking issue]: https://github.com/crossplane/docs/issues/new\r\n[document this change]: https://docs.crossplane.io/contribute/contribute\r\n[cheat sheet]: https://github.com/crossplane/crossplane/tree/master/contributing#checklist-cheat-sheet\r\n\n\r\n\r\n\r\n### Description of your changes\r\n\r\n\r\n\r\nWe frequently give this guidance to function authors, but missed actually codifying it.\r\n\r\nI went with SHOULD NOT rather than MUST NOT mutate external systems, mostly because I'm thinking of edge cases where a read operation might bump some innocuous counter or similar. I could be convinced to switch to MUST NOT.\r\n\r\n\r\nI have: \r\n\r\n- [x] Read and followed Crossplane's [contribution process].\r\n- [ ] ~Run `earthly +reviewable` to ensure this PR is ready for review.~\r\n- [ ] ~Added or updated unit tests.~\r\n- [ ] ~Added or updated e2e tests.~\r\n- [ ] ~Linked a PR or a [docs tracking issue] to [document this change].~\r\n- [ ] ~Added `backport release-x.y` labels to auto-backport this PR.~\r\n\r\nNeed help with this checklist? See the [cheat sheet].\r\n\r\n[contribution process]: https://github.com/crossplane/crossplane/tree/master/contributing\r\n[docs tracking issue]: https://github.com/crossplane/docs/issues/new\r\n[document this change]: https://docs.crossplane.io/contribute/contribute\r\n[cheat sheet]: https://github.com/crossplane/crossplane/tree/master/contributing#checklist-cheat-sheet\r\n\nHello there! @negz, you've done cool work about credetials. Thanks, it shoud help me to use secrets. Please give me commets about this case. I'm using crossplane 1.16.0 and added this to composition:", + "But after applying claims got this error:" + ] + } + }, + "metadata": { + "tags": [ + "crossplane", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "crossplane" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Secret", + "Namespace" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/crossplane/crossplane/pull/5543", + "repo": "https://github.com/crossplane/crossplane", + "pr": "https://github.com/crossplane/crossplane/pull/5808" + }, + "reactions": 7, + "comments": 3, + "synthesizedBy": "regex", + "qualityScore": 67 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with crossplane installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:29.800Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dapr/dapr-1864-add-windows-containers-to-the-docker-multiarch-manifest.json b/solutions/cncf-generated/dapr/dapr-1864-add-windows-containers-to-the-docker-multiarch-manifest.json new file mode 100644 index 00000000..39be0cf7 --- /dev/null +++ b/solutions/cncf-generated/dapr/dapr-1864-add-windows-containers-to-the-docker-multiarch-manifest.json @@ -0,0 +1,83 @@ +{ + "version": "kc-mission-v1", + "name": "dapr-1864-add-windows-containers-to-the-docker-multiarch-manifest", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "dapr: Add windows containers to the docker multiarch manifest", + "description": "It turns out, you can build a docker multiplatfrom image across both windows and linux. This means we can deliver a cross platform sidecar injector simply by creating a daprd windows container and letting docker automatically pull the right one based on the os/architecture of the kubernetes node the user app happens to be deployed to.\n\nPrior to this change, we would build each set of binaries separately, then copy them all to one job to do a docker buildx to create the multiplatform images. Sinc", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "It turns out, you can build a docker multiplatfrom image across both windows and linux. This means we can deliver a cross platform sidecar injector simply by creating a daprd windows container and let" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\ndapr % kubectl set env deployment/dapr-sidecar-injector SIDECAR_IMAGE=docker.io/wcs1only/daprd:edge --namespace=test\r\n\r\ndapr % kubectl describe pod `kubectl get pods --namespace=test | awk '/add/ {print $1}'` --namespace=test | grep -A20 Events\r\nEvents:\r\n Type Reason Age From Message\r\n ---- ------ ---- ---- -------\r\n Normal Scheduled 20s default-scheduler Successfully assigned test/addapp-57c54d5f47-n675g to akswin2000000\r\n Nor\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubestellar/console-kb/pull/6. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# CNCF Mission Generation Report (All Batches)\n\n**Date:** 2026-02-27T17:50:38Z\n**Batches:** 11\n\n---\n## Batch: generation-report-0.md\n# CNCF Mission Generation Report\n\n**Date:** 2026-02-27T17:47:59.472Z\n**Total generated:** 184\n**Skipped (duplicates):** 0\n**Errors:** 0\n\n## Projects Processed\n\n| Project | Maturity | Issues Found | Missions Generated | Errors |\n|---------|----------|-------------|-------------------|--------|\n| argo | graduated | 20 | 20 | 0 |\n| cert-manager | graduated | 17 | 17 | 0 |\n| cilium | graduated | 15 | 15 | 0 |\n| cloudevents | graduated | 3 | 3 | 0 |\n| containerd | graduated | 15 | 15 | 0 |\n| coredns | graduated | 6 | 6 | 0 |\n| cri-o | graduated | 14 | 14 | 0 |\n| crossplane | graduated | 8 | 8 | 0 |\n| cubefs | graduated | 0 | 0 | 0 |\n| dapr | graduated | 8 | 8 | 0 |\n| dragonfly | graduated | 0 | 0 | 0 |\n| envoy | graduated | 16 | 16 | 0 |\n| etcd | graduated | 12 | 12 | 0 |\n| falco | graduated | 9 | 9 | 0 |\n| fluentd | graduated | 2 | 2 | 0 |\n| flux | graduated | 0 | 0 | 0 |\n| harbor | graduated | 7 | 7 | 0 |\n| helm | graduated | 20 | 20 | 0 |\n| in-toto | graduated | 3 | 3 | 0 |\n| istio | graduated | 9 | 9 | 0 |\n\n## Generated Missions\n\n- **argo: feat: Multiple sources for applications** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/10432)\n- **argo: feat: Extra Helm values from external git repo #5826** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/6280)\n- **argo: feat(application-controller): Add support for rollback", + "codeSnippets": [ + "dapr % kubectl set env deployment/dapr-sidecar-injector SIDECAR_IMAGE=docker.io/wcs1only/daprd:edge --namespace=test\r\n\r\ndapr % kubectl describe pod `kubectl get pods --namespace=test | awk '/add/ {print $1}'` --namespace=test | grep -A20 Events\r\nEvents:\r\n Type Reason Age From Message\r\n ---- ------ ---- ---- -------\r\n Normal Scheduled 20s default-scheduler Successfully assigned test/addapp-57c54d5f47-n675g to akswin2000000\r\n Nor", + "dapr % kubectl set env deployment/dapr-sidecar-injector SIDECAR_IMAGE=docker.io/wcs1only/daprd:edge --namespace=test\r\n\r\ndapr % kubectl describe pod `kubectl get pods --namespace=test | awk '/add/ {print $1}'` --namespace=test | grep -A20 Events\r\nEvents:\r\n Type Reason Age From Message\r\n ---- ------ ---- ---- -------\r\n Normal Scheduled 20s default-scheduler Successfully assigned test/addapp-57c54d5f47-n675g to akswin2000000\r\n Normal Pulling 17s kubelet, akswin2000000 Pulling image \"wcs1only/distributed-calculator-go:edge\"\r\n Normal Pulled 16s kubelet, akswin2000000 Successfully pulled image \"wcs1only/distributed-calculator-go:edge\"\r\n Normal Created 16s kubelet, akswin2000000 Created container add\r\n Normal Started 14s kubelet, akswin2000000 Started container add\r\n Normal Pulling 14s kubelet, akswin2000000 Pulling image \"docker.io/wcs1only/daprd:edge\"\r\n Normal Pulled 13s kubelet, akswin2000000 Successfully pulled image \"docker.io/wcs1only/daprd:edge\"\r\n Normal Created 13s kubelet, akswin2000000 Created container daprd\r\n Normal Started 11s kubelet, akswin2000000 Started container daprd\r\ndapr % kubectl logs `kubectl get pods --namespace=test | awk '/addapp/ {print $1}'` add --namespace=test\r\nAdding 5.000000 to 6.000000 on Windows!" + ] + } + }, + "metadata": { + "tags": [ + "dapr", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "dapr" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Job", + "Namespace", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/dapr/dapr/pull/1864", + "repo": "https://github.com/dapr/dapr", + "pr": "https://github.com/kubestellar/console-kb/pull/6" + }, + "reactions": 2, + "comments": 10, + "synthesizedBy": "regex", + "qualityScore": 66 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with dapr installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:11.391Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/dapr/dapr-962-initial-prometheus-metrics-endpoint.json b/solutions/cncf-generated/dapr/dapr-962-initial-prometheus-metrics-endpoint.json new file mode 100644 index 00000000..89a1803e --- /dev/null +++ b/solutions/cncf-generated/dapr/dapr-962-initial-prometheus-metrics-endpoint.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "dapr-962-initial-prometheus-metrics-endpoint", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "dapr: Initial Prometheus Metrics Endpoint", + "description": "I've added an initial metrics endpoint using OpenCensus's Prometheus exporter to hook into Dapr's HTTP pipeline. Due to the incompatibility between fasthttp, net/http, and ochttp this is a bit more messy than I'd like. I'll continue to investigate better alternatives.\n\nThis is built on the existing work in #950 \n\nExample `/metrics` endpoint:\n```", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "I've added an initial metrics endpoint using OpenCensus's Prometheus exporter to hook into Dapr's HTTP pipeline. Due to the incompatibility between fasthttp, net/http, and ochttp this is a bit more me" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: dapr.io/v1alpha1\r\n kind: Component\r\n metadata:\r\n name: ratelimit\r\n spec:\r\n type: middleware.http.ratelimit\r\n metadata:\r\n - name: maxRequestsPerSecond\r\n value: 10\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/dapr/components-contrib/pull/193. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "A middleware that can be used to rate limit the number of requests passing through the HTTP pipeline.\n\nExample component\n```yaml\n apiVersion: dapr.io/v1alpha1\n kind: Component\n metadata:\n name: ratelimit\n spec:\n type: middleware.http.ratelimit\n metadata:\n - name: maxRequestsPerSecond\n value: 10\n```\nExample configuration\n```yaml\n apiVersion: dapr.io/v1alpha1\n kind: Configuration\n metadata:\n name: pipeline\n spec:\n httpPipeline:\n handlers:\n - name: ratelimit\n type: middleware.http.ratelimit\n```\n\n_NOTE:_ The middleware will be invoked on any HTTP ingress request to Dapr and therefore this will also throttle the user application's calls.", + "codeSnippets": [ + "apiVersion: dapr.io/v1alpha1\r\n kind: Component\r\n metadata:\r\n name: ratelimit\r\n spec:\r\n type: middleware.http.ratelimit\r\n metadata:\r\n - name: maxRequestsPerSecond\r\n value: 10", + "apiVersion: dapr.io/v1alpha1\r\n kind: Component\r\n metadata:\r\n name: ratelimit\r\n spec:\r\n type: middleware.http.ratelimit\r\n metadata:\r\n - name: maxRequestsPerSecond\r\n value: 10", + "apiVersion: dapr.io/v1alpha1\r\n kind: Configuration\r\n metadata:\r\n name: pipeline\r\n spec:\r\n httpPipeline:\r\n handlers:\r\n - name: ratelimit\r\n type: middleware.http.ratelimit", + "These are just the default ones already supported but as we add others we can optionally add those to the view.\r\n\r\n\n> @youngbupark if we standardize on a Prometheus endpoint (pull) then people can use whatever compatible agents they want to scrape it. Or would you prefer to support other [OC stats exporters](https://opencensus.io/exporters/supported-exporters/go/) which would require both pull and push? This would then require a new \"MetricsExporter\" type I guess.\r\n\r\nWe may need to support both at the end. I agree that in the first iteration, Prometheus can be the default metric backends in Dapr. In the long term, we need to allow user to use the other oc exporter. \r\nSome teams or companies have their internal telemetry backend/instrumentation, but doesn't support prometheus pull model. Then they need to develop scraping agent to premetheus metric directly (like AKS oms agent) or a loader from prometheus to their own telemetry backend(stackdriver-prometheus sidecar). Allowing oc exporter will be more flexible for them. We need to discuss it more. \r\n\r\n> If we just do Prometheus (no other exporters) - do you think it's reasonable to define what metrics you want to expose in the dapr config similar to how the `TracingSpec` works, or would you want to define a component? To me it feels like if we're only supporting Prometheus there is no \"building block\" and we should just have some metrics toggles in the dapr config such as:\r\n> \r\n>", + "> \r\n> These are just the default ones already supported but as we add others we can optionally add those to the view.\r\n\r\nI also prefer defining new CRD config `MetricSpec` similar to `TracingSpec`. but the spec's scheme would be different from what you suggest. Instead of adding `bool` flag in struct, I would group the metrics and add string:string map to be more flexible." + ] + } + }, + "metadata": { + "tags": [ + "dapr", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "dapr" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/dapr/dapr/pull/962", + "repo": "https://github.com/dapr/dapr", + "pr": "https://github.com/dapr/components-contrib/pull/193" + }, + "reactions": 3, + "comments": 20, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with dapr installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:00.084Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/distribution/distribution-2973-support-ecs-taskrole-in-s3-storage-driver.json b/solutions/cncf-generated/distribution/distribution-2973-support-ecs-taskrole-in-s3-storage-driver.json new file mode 100644 index 00000000..561a78a2 --- /dev/null +++ b/solutions/cncf-generated/distribution/distribution-2973-support-ecs-taskrole-in-s3-storage-driver.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "distribution-2973-support-ecs-taskrole-in-s3-storage-driver", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "distribution: Support ECS TaskRole in S3 storage driver", + "description": "Instead of constructing the list of credential providers manually, if we use the default list we can take advantage of the AWS SDK checking the environment and returning either the EC2RoleProvider or the generic HTTP credentials provider, configured to use the ECS credentials endpoint.\n\nAlso, use the `defaults.Config()` function instead of `aws.NewConfig()`, as this results in an initialised HTTP client which prevents a fatal error when retrieving credentials from the ECS credentials endpoint.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Instead of constructing the list of credential providers manually, if we use the default list we can take advantage of the AWS SDK checking the environment and returning either the EC2RoleProvider or " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nstorage:\r\n s3:\r\n accesskey: ABCDEFG123HY4LWA5TOG\r\n secretkey: AB1C+D2EF/Ge3cA/vQbUfMVpuDImUZsxkpCQDUy0\r\n region: us-east-1\r\n ...\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/distribution/distribution/pull/3245. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR reopens #2973 and fixes still hardcoded list of credential providers:\n\n- `StaticProvider`\n- `EnvVarProvider`\n- `SharedCredentialProvider`\n- `EC2RoleProvider`\n\nInstead it relies on SDK defaults when creating a new AWS Config and AWS Session. And only overrides them with `StaticProvider` if `accesskey` and `secretkey` are provided via `config.yml`:\n\n```yaml\nstorage:\n s3:\n accesskey: ABCDEFG123HY4LWA5TOG\n secretkey: AB1C+D2EF/Ge3cA/vQbUfMVpuDImUZsxkpCQDUy0\n region: us-east-1\n ...\n``` \n\nAs the result, it uses AssumeRoleWithWebIdentity credential provider (in [`session.resolveCredentials`](https://github.com/aws/aws-sdk-go/blob/95871fc3b42a8910f81ec01c74975293dafced97/aws/session/session.go#L630)) that resolves assigned IAM role to K8s Service Accounts via OIDC token. Please see [here](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/) for more details.\n\nThis changed have been tested in the following scenario to grant access to AWS S3 Bucket:\n\n- IAM Role for Service Accounts that're assigned to Pods on EKS cluster\n- IAM Role for EC2 instances\n- Environment Variables: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` on a local machine\n\nUpstreamed from Docker Hub", + "codeSnippets": [ + "storage:\r\n s3:\r\n accesskey: ABCDEFG123HY4LWA5TOG\r\n secretkey: AB1C+D2EF/Ge3cA/vQbUfMVpuDImUZsxkpCQDUy0\r\n region: us-east-1\r\n ...", + "storage:\r\n s3:\r\n accesskey: ABCDEFG123HY4LWA5TOG\r\n secretkey: AB1C+D2EF/Ge3cA/vQbUfMVpuDImUZsxkpCQDUy0\r\n region: us-east-1\r\n ..." + ] + } + }, + "metadata": { + "tags": [ + "distribution", + "sandbox", + "app-definition", + "troubleshoot", + "area-storage-s3" + ], + "cncfProjects": [ + "distribution" + ], + "targetResourceKinds": [ + "Role" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/distribution/distribution/pull/2973", + "repo": "https://github.com/distribution/distribution", + "pr": "https://github.com/distribution/distribution/pull/3245" + }, + "reactions": 5, + "comments": 16, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with distribution installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:40:15.981Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/envoy/envoy-36369-tls-support-for-ecdsa-p-384-and-p-521-certificates.json b/solutions/cncf-generated/envoy/envoy-36369-tls-support-for-ecdsa-p-384-and-p-521-certificates.json new file mode 100644 index 00000000..19cf7c04 --- /dev/null +++ b/solutions/cncf-generated/envoy/envoy-36369-tls-support-for-ecdsa-p-384-and-p-521-certificates.json @@ -0,0 +1,77 @@ +{ + "version": "kc-mission-v1", + "name": "envoy-36369-tls-support-for-ecdsa-p-384-and-p-521-certificates", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "envoy: tls: support for ECDSA P-384 and P-521 certificates", + "description": "Commit Message: tls: support for ECDSA P-384 and P-521 certificates (#10855)\n\nAdditional Description: Commercial National Security Algorithm Suite (CNSA) requires ECDSA keys be specified with P-384 curves. The assertion that there are [no security benefits to curves higher than P-256](https://github.com/envoyproxy/envoy/pull/5224#issue-387770091) is no longer true. This change is intended to limit the adoptable curves to P-384 and P-521.\n\nRisk Level: Medium - removal of limitation of curves to b", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Commit Message: tls: support for ECDSA P-384 and P-521 certificates (#10855)\n\nAdditional Description: Commercial National Security Algorithm Suite (CNSA) requires ECDSA keys be specified with P-384 cu" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n---\r\nadmin:\r\n address:\r\n socket_address:\r\n address: 127.0.0.1\r\n port_value: 9901\r\nstatic_resources:\r\n listeners:\r\n - name: listener_0\r\n address:\r\n socket_address:\r\n address: 127.0.0.1\r\n port_value: 10000\r\n filter_chains:\r\n - filters:\r\n - name: envoy.filters.network.http_connection_manager\r\n typed_config:\r\n \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.Http\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "I removed the fixed issue number from the title because otherwise when this is merged, it'll have two numbers in parentheses where the PR number usually is. The fixed issue number is part of the body of the commit message.", + "codeSnippets": [ + "---\r\nadmin:\r\n address:\r\n socket_address:\r\n address: 127.0.0.1\r\n port_value: 9901\r\nstatic_resources:\r\n listeners:\r\n - name: listener_0\r\n address:\r\n socket_address:\r\n address: 127.0.0.1\r\n port_value: 10000\r\n filter_chains:\r\n - filters:\r\n - name: envoy.filters.network.http_connection_manager\r\n typed_config:\r\n \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.Http", + "---\r\nadmin:\r\n address:\r\n socket_address:\r\n address: 127.0.0.1\r\n port_value: 9901\r\nstatic_resources:\r\n listeners:\r\n - name: listener_0\r\n address:\r\n socket_address:\r\n address: 127.0.0.1\r\n port_value: 10000\r\n filter_chains:\r\n - filters:\r\n - name: envoy.filters.network.http_connection_manager\r\n typed_config:\r\n \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\r\n stat_prefix: ingress_http\r\n codec_type: AUTO\r\n route_config:\r\n name: local_route\r\n virtual_hosts:\r\n - name: local_service\r\n domains:\r\n - \"*\"\r\n routes:\r\n - match:\r\n prefix: /\r\n route:\r\n cluster: some_service\r\n http_filters:\r\n - name: envoy.filters.http.router\r\n typed_config:\r\n \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\r\n transport_socket:\r\n name: envoy.transport_sockets.tls\r\n typed_config:\r\n \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\r\n common_tls_context:\r\n tls_certificates:\r\n - certificate_chain: {filename: \"test/common/tls/test_data/selfsigned_ecdsa_p384_cert.pem\"}\r\n private_key: {filename: \"test/common/tls/test_data/selfsigned_ecdsa_p384_key.pem\"}\r\n clusters:\r\n - name: some_service\r\n connect_timeout: 0.25s\r\n type: STATIC\r\n lb_policy: ROUND_ROBIN\r\n load_assignment:\r\n cluster_name: some_service\r\n endpoints:\r\n - lb_endpoints:\r\n - endpoint:\r\n address:\r\n socket_address:\r\n address: 127.0.0.1\r\n port_value: 1234", + "Connecting to 127.0.0.1\r\nCONNECTED(00000003)\r\nCan't use SSL_get_servername\r\ndepth=0 C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\nverify error:num=18:self-signed certificate\r\nverify return:1\r\ndepth=0 C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\nverify return:1\r\n---\r\nCertificate chain\r\n 0 s:C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\n i:C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\n a:PKEY: id-ecPublicKey, 384 (bit); sigalg: ecdsa-with-SHA256\r\n v:NotBefore: Aug 21 19:14:10 2024 GMT; NotAfter: Aug 21 19:14:10 2026 GMT\r\n---\r\nServer certificate\r\n-----BEGIN CERTIFICATE-----\r\nMIIC0jCCAlegAwIBAgIUUv13YuIFYMJxp1t4z8Z7H0cFdHowCgYIKoZIzj0EAwIw\r\nejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh\r\nbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5naW5l\r\nZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMB4XDTI0MDgyMTE5MTQxMFoXDTI2\r\nMDgyMTE5MTQxMFowejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\r\nFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM\r\nEEx5ZnQgRW5naW5lZXJpbmcxFDASBgNVBAMMC1Rlc3QgU2VydmVyMHYwEAYHKoZI\r\nzj0CAQYFK4EEACIDYgAEtFQWaGrCFUT70YVGv9IA0H1d/fUGdoATjqAQlgOnzWf4\r\nFcJIqRQ8dGJ0wom/p8b/3MrKpy8wpWBnAo2C9+9owGdOqcqSIFLVV0iaGogKhIAx\r\n7KAjWoMEpal4uNnaYLlCo4GdMIGaMAwGA1UdEwEB/wQCMAAwCwYDVR0PBAQDAgXg\r\nMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAeBgNVHREEFzAVghNzZXJ2\r\nZXIxLmV4YW1wbGUuY29tMB0GA1UdDgQWBBQ23kFgk8ELq1P0xW3R8SYRwJRcyjAf\r\nBgNVHSMEGDAWgBQ23kFgk8ELq1P0xW3R8SYRwJRcyjAKBggqhkjOPQQDAgNpADBm\r\nAjEA6FC5eEaKcV7i9AUuVsIJruDKqLVmSLKzHX+DVxOvaxQcTuKMwtg8AuTq1qq+\r\nMZ8EAjEA3JKxxjQAp2hi2gvSUGXQqk3seETImDNmUdWXmYcohDRM36KKJORqXoui\r\njD+/8ipt\r\n-----END CERTIFICATE-----\r\nsubject=C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\nissuer=C=US, ST=California, L=San Francisco, O=Lyft, OU=Lyft Engineering, CN=Test Server\r\n---\r\nNo client certificate CA names sent\r\nPeer signing digest: SHA384\r\nPeer signature type: ECDSA\r\nServer Temp Key: X25519, 253 bits\r\n---\r\nSSL handshake has read 1062 bytes and written 379 bytes\r\nVerification error: self-signed certificate\r\n---\r\nNew, TLSv1.3, Cipher is TLS_AES_256_GCM_SHA384\r\nServer public key is 384 bit\r\nThis TLS version forbids renegotiation.\r\nCompression: NONE\r\nExpansion: NONE\r\nNo ALPN negotiated\r\nEarly data was not sent\r\nVerify return code: 18 (self-signed certificate)\r\n---" + ] + } + }, + "metadata": { + "tags": [ + "envoy", + "graduated", + "networking", + "troubleshoot", + "deps" + ], + "cncfProjects": [ + "envoy" + ], + "targetResourceKinds": [ + "Service", + "Ingress" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/envoyproxy/envoy/pull/36369", + "repo": "https://github.com/envoyproxy/envoy" + }, + "reactions": 4, + "comments": 23, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with envoy installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:31.264Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/etcd/etcd-11375-etcdserver-fix-watch-metrics.json b/solutions/cncf-generated/etcd/etcd-11375-etcdserver-fix-watch-metrics.json new file mode 100644 index 00000000..c0243124 --- /dev/null +++ b/solutions/cncf-generated/etcd/etcd-11375-etcdserver-fix-watch-metrics.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "etcd-11375-etcdserver-fix-watch-metrics", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "etcd: etcdserver: fix watch metrics", + "description": "Currently, when a client closes context during watch we pass. `codes.Unavailable` to `status.New()` via `rpctypes.ErrGRPCNoLeader`[1],[2] this inadvertently registers `Unavailable` in Prometheus metrics which causes an issue as `Unavailable` indicates the service is currently unavailable [3]. This PR changes the logic for how we conclude the leader is lost by observing `RaftStatusGetter.Leader()`[4] for `raft.None`. Only then do we return Unavailable (no leader) otherwise Canceled.\n\n[1] https://", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Currently, when a client closes context during watch we pass. `codes.Unavailable` to `status.New()` via `rpctypes.ErrGRPCNoLeader`[1],[2] this inadvertently registers `Unavailable` in Prometheus metri" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n| [Impacted Files](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [etcdserver/api/v3rpc/rpctypes/error.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvcnBjdHlwZXMvZXJyb3IuZ28=) | `90.47% <ø> (ø)` | :arrow_up: |\n| [etcdserver/api/v3rpc/watch.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvd2F0Y2guZ28=) | `80.06% <100%> (+1.63%)` | :arrow_up:\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/etcd-io/etcd/pull/12196. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Before this patch, a client which cancels the context for a watch results in the\nserver generating a `rpctypes.ErrGRPCNoLeader` error that leads the recording of\na gRPC `Unavailable` metric in association with the client watch cancellation.\nThe metric looks like this:\n\n grpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Watch\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}\n\nSo, the watch server has misidentified the error as a server error and then\npropagates the mistake to metrics, leading to a false indicator that the leader\nhas been lost. This false signal then leads to false alerting.\n\nThe commit 9c103dd0dedfc723cd4f33b6a5e81343d8a6bae7 introduced an interceptor which wraps\nwatch streams requiring a leader, causing those streams to be actively canceled\nwhen leader loss is detected.\n\nHowever, the error handling code assumes all stream context cancellations are\nfrom the interceptor. This assumption is broken when the context was canceled\nbecause of a client stream cancelation.\n\nThe core challenge is lack of information conveyed via `context.Context` which\nis shared by both the send and receive sides of the stream handling and is\nsubject to cancellation by all paths (including the gRPC library itself). If any\npiece of the system cancels the shared context, there's no way for a context\nconsumer to understand who cancelled the context or why.\n\nTo solve the ambiguity of the stream interceptor code specifically, this patch\nintroduces a custom context s", + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [etcdserver/api/v3rpc/rpctypes/error.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvcnBjdHlwZXMvZXJyb3IuZ28=) | `90.47% <ø> (ø)` | :arrow_up: |\n| [etcdserver/api/v3rpc/watch.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvd2F0Y2guZ28=) | `80.06% <100%> (+1.63%)` | :arrow_up:", + "| [Impacted Files](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [etcdserver/api/v3rpc/rpctypes/error.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvcnBjdHlwZXMvZXJyb3IuZ28=) | `90.47% <ø> (ø)` | :arrow_up: |\n| [etcdserver/api/v3rpc/watch.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvd2F0Y2guZ28=) | `80.06% <100%> (+1.63%)` | :arrow_up: |\n| [etcdserver/api/v3rpc/lease.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvbGVhc2UuZ28=) | `67.04% <0%> (-7.96%)` | :arrow_down: |\n| [auth/store.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-YXV0aC9zdG9yZS5nbw==) | `44.82% <0%> (-2.56%)` | :arrow_down: |\n| [lease/leasehttp/http.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-bGVhc2UvbGVhc2VodHRwL2h0dHAuZ28=) | `64.23% <0%> (-1.46%)` | :arrow_down: |\n| [etcdserver/api/v2http/client.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjJodHRwL2NsaWVudC5nbw==) | `84.3% <0%> (-1.21%)` | :arrow_down: |\n| [pkg/proxy/server.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-cGtnL3Byb3h5L3NlcnZlci5nbw==) | `60.2% <0%> (-1.02%)` | :arrow_down: |\n| [etcdserver/v3\\_server.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci92M19zZXJ2ZXIuZ28=) | `72.86% <0%> (-0.86%)` | :arrow_down: |\n| [mvcc/watchable\\_store.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-bXZjYy93YXRjaGFibGVfc3RvcmUuZ28=) | `82.51% <0%> (-0.7%)` | :arrow_down: |\n| [mvcc/metrics\\_txn.go](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree#diff-bXZjYy9tZXRyaWNzX3R4bi5nbw==) | `100% <0%> (ø)` | :arrow_up: |\n| ... and [20 more](https://codecov.io/gh/etcd-io/etcd/pull/11375/diff?src=pr&el=tree-more) | |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=footer). Last update [ec52217...91042e2](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\n/cc @brancz \n/cc @jingyih @gyuho PTAL\nI have very little idea about the code changes, they look fine to me but I really don’t know the code very well. If it does what’s promised then I’m extremely excited to finally turn the alerts back on! :)\n@gyuho would you mind taking a peek please:).\n@xiang90 would you mind looking please.\nHi guys, any update on this? Thanks in advance!\nbump @hexfusion. There are TODOs on this PR from @xiang90’s feedback.\nI hope to get back to this soon.\n# [Codecov](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=h1) Report\n> Merging [#11375](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=desc) into [master](https://codecov.io/gh/etcd-io/etcd/commit/63dd73c1869f1784f907b922f61571176a2802e8&el=desc) will **decrease** coverage by `0.64%`.\n> The diff coverage is `100.00%`.\n\n[![Impacted file tree graph](https://codecov.io/gh/etcd-io/etcd/pull/11375/graphs/tree.svg?width=650&height=150&src=pr&token=so7nNovJo3)](https://codecov.io/gh/etcd-io/etcd/pull/11375?src=pr&el=tree)" + ] + } + }, + "metadata": { + "tags": [ + "etcd", + "graduated", + "orchestration", + "troubleshoot", + "backport-v3-4" + ], + "cncfProjects": [ + "etcd" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/etcd-io/etcd/pull/11375", + "repo": "https://github.com/etcd-io/etcd", + "pr": "https://github.com/etcd-io/etcd/pull/12196" + }, + "reactions": 3, + "comments": 15, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with etcd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:06.602Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/etcd/etcd-11776-fix-cluster-peer-http-srv-discovery-when-no-https-records-exist.json b/solutions/cncf-generated/etcd/etcd-11776-fix-cluster-peer-http-srv-discovery-when-no-https-records-exist.json new file mode 100644 index 00000000..2e9455f9 --- /dev/null +++ b/solutions/cncf-generated/etcd/etcd-11776-fix-cluster-peer-http-srv-discovery-when-no-https-records-exist.json @@ -0,0 +1,77 @@ +{ + "version": "kc-mission-v1", + "name": "etcd-11776-fix-cluster-peer-http-srv-discovery-when-no-https-records-exist", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "etcd: Fix cluster peer HTTP SRV discovery when no HTTPS records exist", + "description": "embed: Fix cluster peer HTTP SRV discovery\n\nFixed issue where peer SRV discovery failed if no HTTPS endpoints were discovered. HTTP endpoints were never added to the address list due to a bad error check, and the `_etcd-server-ssl._tcp.` failure masked the subsequent success of lookups for `_etcd-server._tcp.`", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "embed: Fix cluster peer HTTP SRV discovery\n\nFixed issue where peer SRV discovery failed if no HTTPS endpoints were discovered. HTTP endpoints were never added to the address list due to a bad error ch" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n| [Impacted Files](https://codecov.io/gh/etcd-io/etcd/pull/11776?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [embed/config.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-ZW1iZWQvY29uZmlnLmdv) | `54.12% <0.00%> (ø)` | |\n| [auth/store.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-YXV0aC9zdG9yZS5nbw==) | `58.84% <0.00%> (-19.53%)` | :arrow_down: |\n| [pkg/netutil/netutil.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tre\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubestellar/console-kb/pull/6. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# CNCF Mission Generation Report (All Batches)\n\n**Date:** 2026-02-27T17:50:38Z\n**Batches:** 11\n\n---\n## Batch: generation-report-0.md\n# CNCF Mission Generation Report\n\n**Date:** 2026-02-27T17:47:59.472Z\n**Total generated:** 184\n**Skipped (duplicates):** 0\n**Errors:** 0\n\n## Projects Processed\n\n| Project | Maturity | Issues Found | Missions Generated | Errors |\n|---------|----------|-------------|-------------------|--------|\n| argo | graduated | 20 | 20 | 0 |\n| cert-manager | graduated | 17 | 17 | 0 |\n| cilium | graduated | 15 | 15 | 0 |\n| cloudevents | graduated | 3 | 3 | 0 |\n| containerd | graduated | 15 | 15 | 0 |\n| coredns | graduated | 6 | 6 | 0 |\n| cri-o | graduated | 14 | 14 | 0 |\n| crossplane | graduated | 8 | 8 | 0 |\n| cubefs | graduated | 0 | 0 | 0 |\n| dapr | graduated | 8 | 8 | 0 |\n| dragonfly | graduated | 0 | 0 | 0 |\n| envoy | graduated | 16 | 16 | 0 |\n| etcd | graduated | 12 | 12 | 0 |\n| falco | graduated | 9 | 9 | 0 |\n| fluentd | graduated | 2 | 2 | 0 |\n| flux | graduated | 0 | 0 | 0 |\n| harbor | graduated | 7 | 7 | 0 |\n| helm | graduated | 20 | 20 | 0 |\n| in-toto | graduated | 3 | 3 | 0 |\n| istio | graduated | 9 | 9 | 0 |\n\n## Generated Missions\n\n- **argo: feat: Multiple sources for applications** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/10432)\n- **argo: feat: Extra Helm values from external git repo #5826** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/6280)\n- **argo: feat(application-controller): Add support for rollback", + "codeSnippets": [ + "| [Impacted Files](https://codecov.io/gh/etcd-io/etcd/pull/11776?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [embed/config.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-ZW1iZWQvY29uZmlnLmdv) | `54.12% <0.00%> (ø)` | |\n| [auth/store.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-YXV0aC9zdG9yZS5nbw==) | `58.84% <0.00%> (-19.53%)` | :arrow_down: |\n| [pkg/netutil/netutil.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tre", + "| [Impacted Files](https://codecov.io/gh/etcd-io/etcd/pull/11776?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [embed/config.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-ZW1iZWQvY29uZmlnLmdv) | `54.12% <0.00%> (ø)` | |\n| [auth/store.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-YXV0aC9zdG9yZS5nbw==) | `58.84% <0.00%> (-19.53%)` | :arrow_down: |\n| [pkg/netutil/netutil.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-cGtnL25ldHV0aWwvbmV0dXRpbC5nbw==) | `61.47% <0.00%> (-7.38%)` | :arrow_down: |\n| [clientv3/namespace/watch.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-Y2xpZW50djMvbmFtZXNwYWNlL3dhdGNoLmdv) | `87.87% <0.00%> (-6.07%)` | :arrow_down: |\n| [etcdserver/api/v3rpc/lease.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci9hcGkvdjNycGMvbGVhc2UuZ28=) | `77.21% <0.00%> (-5.07%)` | :arrow_down: |\n| [proxy/grpcproxy/watcher.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-cHJveHkvZ3JwY3Byb3h5L3dhdGNoZXIuZ28=) | `89.79% <0.00%> (-4.09%)` | :arrow_down: |\n| [clientv3/leasing/cache.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-Y2xpZW50djMvbGVhc2luZy9jYWNoZS5nbw==) | `87.77% <0.00%> (-3.89%)` | :arrow_down: |\n| [pkg/testutil/recorder.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-cGtnL3Rlc3R1dGlsL3JlY29yZGVyLmdv) | `77.77% <0.00%> (-3.71%)` | :arrow_down: |\n| [etcdserver/util.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-ZXRjZHNlcnZlci91dGlsLmdv) | `95.23% <0.00%> (-3.58%)` | :arrow_down: |\n| [clientv3/leasing/txn.go](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree#diff-Y2xpZW50djMvbGVhc2luZy90eG4uZ28=) | `88.09% <0.00%> (-3.18%)` | :arrow_down: |\n| ... and [18 more](https://codecov.io/gh/etcd-io/etcd/pull/11776/diff?src=pr&el=tree-more) | |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/etcd-io/etcd/pull/11776?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/etcd-io/etcd/pull/11776?src=pr&el=footer). Last update [59f5fb2...6f40c1b](https://codecov.io/gh/etcd-io/etcd/pull/11776?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\nThanks a lot for this fix @brandond .\r\n\r\nCan someone from the team please merge this change? Thanks in advance.\nYeah, we also really need this PR merged so we can finally upgrade our cluster.\nIs there anything I can do to help move this forward? I realize pulling in multierr might be controversial, I'm open to alternate approaches to wrapping or combining the errors.\n@brandond have you been able to find a workaround in the meantime? Or did you endup just using your own fork?\n@trivigy yeah I'm just running off a local fork.\n@gyuho could you (or another maintainer) maybe have a look? We finally want to upgrade.\nThis issue has been automatically marked as stale because it has not had recent activity. It will be closed after 21 days if no further activity occurs. Thank you for your contributions.\n\nNot stale\nRebased and requested changes made. I'll take a look at how to best mock the SRV responses for a proper test.\nThank you for the iteration. \r\n\r\n* Please run ./script/fix.sh, as go.mod -> go.sum went out of sync, and that's the reason why the test fails. \r\n* Is it possible to add a regression test for this scenario ? \r\n\n@ptabor go.sum should be fixed now. I added some tests for both sections of the codebase that I touched; hopefully that looks good? I'm not sure how to get codecov to update.\nThank you for fix. \r\nDon't worry about code cov. \r\n[edit: In ~30min there should be link at the end of this test output: https://travis-ci.com/github/etcd-io/etcd/jobs/479950649]\r\n\r\nHopefully last thing to update: There is inconsistency in go.mod's that break PASSES=\"fmt\" ./test.sh" + ] + } + }, + "metadata": { + "tags": [ + "etcd", + "graduated", + "orchestration", + "troubleshoot" + ], + "cncfProjects": [ + "etcd" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/etcd-io/etcd/pull/11776", + "repo": "https://github.com/etcd-io/etcd", + "pr": "https://github.com/kubestellar/console-kb/pull/6" + }, + "reactions": 9, + "comments": 16, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with etcd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:51.953Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/etcd/etcd-12196-etcdserver-fix-incorrect-metrics-generated-when-clients-cancel-watche.json b/solutions/cncf-generated/etcd/etcd-12196-etcdserver-fix-incorrect-metrics-generated-when-clients-cancel-watche.json new file mode 100644 index 00000000..6c23aa17 --- /dev/null +++ b/solutions/cncf-generated/etcd/etcd-12196-etcdserver-fix-incorrect-metrics-generated-when-clients-cancel-watche.json @@ -0,0 +1,79 @@ +{ + "version": "kc-mission-v1", + "name": "etcd-12196-etcdserver-fix-incorrect-metrics-generated-when-clients-cancel-watche", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "etcd: etcdserver: fix incorrect metrics generated when clients cancel watches", + "description": "Before this patch, a client which cancels the context for a watch results in the\nserver generating a `rpctypes.ErrGRPCNoLeader` error that leads the recording of\na gRPC `Unavailable` metric in association with the client watch cancellation.\nThe metric looks like this:\n\n grpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Watch\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}\n\nSo, the watch server has misidentified the error as a server error and then\npropagates the mista", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Before this patch, a client which cancels the context for a watch results in the\nserver generating a `rpctypes.ErrGRPCNoLeader` error that leads the recording of\na gRPC `Unavailable` metric in associa" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nThe following portion of [watch.go](https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/watch.go#L200) exhibits a race condition in stream error handling which seems to intermittently obfuscate the client/server nature of the error. Annotations inline:\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/etcd-io/etcd/pull/11375. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Currently, when a client closes context during watch we pass. `codes.Unavailable` to `status.New()` via `rpctypes.ErrGRPCNoLeader`[1],[2] this inadvertently registers `Unavailable` in Prometheus metrics which causes an issue as `Unavailable` indicates the service is currently unavailable [3]. This PR changes the logic for how we conclude the leader is lost by observing `RaftStatusGetter.Leader()`[4] for `raft.None`. Only then do we return Unavailable (no leader) otherwise Canceled.\n\n[1] https://github.com/etcd-io/etcd/pull/11375/files#diff-8a4ebdea7c0a8a8926fca73c3058b0b9L200\n[2] - https://github.com/etcd-io/etcd/blob/0fb26df249f1cd4982c49ef125a3b313dfbde7d6/etcdserver/api/v3rpc/rpctypes/error.go#L68\n[3] https://github.com/grpc/grpc-go/blob/master/codes/codes.go#L140\n[4] - https://github.com/etcd-io/etcd/blob/bbe1e78e6242a57d54c4b96d8c49ea1e094c3cbb/etcdserver/server.go#L1907", + "codeSnippets": [ + "The following portion of [watch.go](https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/watch.go#L200) exhibits a race condition in stream error handling which seems to intermittently obfuscate the client/server nature of the error. Annotations inline:", + "The following portion of [watch.go](https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/watch.go#L200) exhibits a race condition in stream error handling which seems to intermittently obfuscate the client/server nature of the error. Annotations inline:" + ] + } + }, + "metadata": { + "tags": [ + "etcd", + "graduated", + "orchestration", + "troubleshoot" + ], + "cncfProjects": [ + "etcd" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/etcd-io/etcd/pull/12196", + "repo": "https://github.com/etcd-io/etcd", + "pr": "https://github.com/etcd-io/etcd/pull/11375" + }, + "reactions": 5, + "comments": 19, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with etcd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:00.939Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-3603-feat-add-bitwarden-secret-manager-support.json b/solutions/cncf-generated/external-secrets/external-secrets-3603-feat-add-bitwarden-secret-manager-support.json new file mode 100644 index 00000000..1843b528 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-3603-feat-add-bitwarden-secret-manager-support.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "external-secrets-3603-feat-add-bitwarden-secret-manager-support", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "external-secrets: feat: add bitwarden secret manager support", + "description": "## Problem Statement\n\nBitwarden Secret Manager client.\n\n## Related Issue\n\nFixes https://github.com/external-secrets/external-secrets/issues/2661\n\n## Proposed Changes\n\nHow do you like to solve the issue and why?\n\n## Checklist", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Problem Statement\n\nBitwarden Secret Manager client.\n\n## Related Issue\n\nFixes https://github.com/external-secrets/external-secrets/issues/2661\n\n## Proposed Changes\n\nHow do you like to solve the issu" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: external-secrets.io/v1beta1\r\nkind: ExternalSecret\r\nmetadata:\r\n name: bitwarden\r\nspec:\r\n refreshInterval: 10s\r\n secretStoreRef:\r\n # This name must match the metadata.name in the `SecretStore`\r\n name: bitwarden-secretsmanager\r\n kind: SecretStore\r\n data:\r\n - secretKey: test\r\n remoteRef:\r\n key: \"test2\"\r\n property: \"f5847eef-2f89-43bc-885a-b18a01178e3e\"\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/tropnikovvl/renovate-demo/pull/31. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> ℹ️ **Note**\n> \n> This PR body was truncated due to platform limits.\n\nThis PR contains the following updates:\n\n| Package | Update | Change |\n|---|---|---|\n| [external-dns](https://bitnami.com) ([source](https://redirect.github.com/bitnami/charts/tree/HEAD/bitnami/external-dns)) | minor | `7.0.0` → `7.5.7` |\n| [external-secrets/external-secrets](https://redirect.github.com/external-secrets/external-secrets) | minor | `v0.9.0` → `v0.20.4` |\n\n---\n\n### [`v7.5.7`](https://redirect.github.com/bitnami/charts/blob/HEAD/bitnami/external-dns/CHANGELOG.md#small757-2024-06-18-small)\n\n- \\[bitnami/external-dns] Release 7.5.7 ([#​27341](https://redirect.github.com/bitnami/charts/issues/27341)) ([6668b3c](https://redirect.github.com/bitnami/charts/commit/6668b3c1ae632eb90b6e825fedddc39637aec137)), closes [#​27341](https://redirect.github.com/bitnami/charts/issues/27341)\n\n### [`v7.5.6`](https://redirect.github.com/bitnami/charts/blob/HEAD/bitnami/external-dns/CHANGELOG.md#small756-2024-06-17-small)\n\n- \\[bitnami/external-dns] Release 7.5.6 ([#​27216](https://redirect.github.com/bitnami/charts/issues/27216)) ([cfe95c9](https://redirect.github.com/bitnami/charts/commit/cfe95c9c1d2c0e6b8e3f1b574f992f47486bc91a)), closes [#​27216](https://redirect.github.com/bitnami/charts/issues/27216)\n\n### [`v7.5.5`](https://redirect.github.com/bitnami/charts/blob/HEAD/bitnami/external-dns/CHANGELOG.md#small75", + "codeSnippets": [ + "apiVersion: external-secrets.io/v1beta1\r\nkind: ExternalSecret\r\nmetadata:\r\n name: bitwarden\r\nspec:\r\n refreshInterval: 10s\r\n secretStoreRef:\r\n # This name must match the metadata.name in the `SecretStore`\r\n name: bitwarden-secretsmanager\r\n kind: SecretStore\r\n data:\r\n - secretKey: test\r\n remoteRef:\r\n key: \"test2\"\r\n property: \"f5847eef-2f89-43bc-885a-b18a01178e3e\"", + "apiVersion: external-secrets.io/v1beta1\r\nkind: ExternalSecret\r\nmetadata:\r\n name: bitwarden\r\nspec:\r\n refreshInterval: 10s\r\n secretStoreRef:\r\n # This name must match the metadata.name in the `SecretStore`\r\n name: bitwarden-secretsmanager\r\n kind: SecretStore\r\n data:\r\n - secretKey: test\r\n remoteRef:\r\n key: \"test2\"\r\n property: \"f5847eef-2f89-43bc-885a-b18a01178e3e\"", + "apiVersion: v1\r\ndata:\r\n test: c2VjcmV0 # test\r\nimmutable: false\r\nkind: Secret\r\nmetadata:\r\n name: bitwarden\r\n namespace: default\r\ntype: Opaque", + "apiVersion: external-secrets.io/v1alpha1\r\nkind: PushSecret\r\nmetadata:\r\n name: pushsecret-bitwarden # Customisable\r\nspec:\r\n refreshInterval: 10s # Refresh interval for which push secret will reconcile\r\n secretStoreRefs: # A list of secret stores to push secrets to\r\n - name: bitwarden-secretsmanager\r\n kind: SecretStore\r\n selector:\r\n secret:\r\n name: my-secret # Source Kubernetes secret to be pushed\r\n data:\r\n - match:\r\n secretKey: test4 # Source Kubernetes secret key to be pushed\r\n remoteRef:\r\n remoteKey: test4 # Remote reference (where the secret is going to be pushed)\r\n property: f5847eef-2f89-43bc-885a-b18a01178e3e\r\n metadata:\r\n note: \"Note of the secret to add.\"", + "Events: │\r\n│ Type Reason Age From Message │\r\n│ ---- ------ ---- ---- ------- │\r\n│ Normal Created 8m37s external-secrets Created Secret │\r\n│ Warning UpdateFailed 0s (x5 over 3s) external-secrets error retrieving secret at .data[0], key: test2, err: error getting secret: more than one secret found for project f5847eef-2f89-43bc-885a-b18a01178e3e with key test2" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security", + "troubleshoot" + ], + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/external-secrets/external-secrets/pull/3603", + "repo": "https://github.com/external-secrets/external-secrets", + "pr": "https://github.com/tropnikovvl/renovate-demo/pull/31" + }, + "reactions": 13, + "comments": 3, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with external-secrets installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:39:55.065Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-4538-feat-aws-support-for-aws-tags.json b/solutions/cncf-generated/external-secrets/external-secrets-4538-feat-aws-support-for-aws-tags.json new file mode 100644 index 00000000..c9b99092 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-4538-feat-aws-support-for-aws-tags.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "external-secrets-4538-feat-aws-support-for-aws-tags", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "external-secrets: feat(aws): support for aws tags", + "description": "## Problem Statement\n\nWhat is the problem you're trying to solve?\n\n## Related Issue\n\nPartially resolves #1821 . Provides a capability to create secrets with KSM key, Description and Tags. \n\nIt does not:\n- modify existing keys if/when tags added/removed\n- provide a way to configure capability to set KSM resource policy\n- secret replication in other region\n\n## Proposed Changes\n\nHow do you like to solve the issue and why?\n\nAdded support to create a secret with\n- tags\n- description\n- kms key default", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Problem Statement\n\nWhat is the problem you're trying to solve?\n\n## Related Issue\n\nPartially resolves #1821 . Provides a capability to create secrets with KSM key, Description and Tags. \n\nIt does no" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nmetada:\r\n secretPushFormat: string\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/external-secrets/external-secrets/pull/4984. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## Problem Statement\n\nWhat is the problem you're trying to solve?\n- at the moment, tags only applied to secrets on secret creation.\n- Secret manager to support patch/update/delete tags (aka full lifecycle) https://github.com/external-secrets/external-secrets/issues/1821#issuecomment-2848624074\n- Initially I've added only attach tags on secret creation https://github.com/external-secrets/external-secrets/pull/4538 the plan was to add update/delete right after aws-sdk bump to v2\n- small fix \n\"Screenshot\n\n## Related Issue\n\n## Proposed Changes\n\nAdded support for tags lifecycle\n\n## Checklist\n\nManifest\n\n```yml\n---\napiVersion: v1\nkind: Namespace\nmetadata:\n name: external-secrets\n---\n# https://external-secrets.io/latest/api/secretstore/\napiVersion: external-secrets.io/v1\nkind: SecretStore\nmetadata:\n name: aws-secretstore\n namespace: external-secrets\nspec:\n provider:\n aws:\n service: SecretsManager\n region: eu-west-1\n secretsManager:\n forceDeleteWithoutRecovery: tru", + "codeSnippets": [ + "metada:\r\n secretPushFormat: string", + "metada:\r\n secretPushFormat: string", + "metadata:\r\n apiVersion: kubernetes.external-secrets.io/v1alpha1\r\n kind: PushSecretMetadata\r\n spec:\r\n secretPushFormat: string # When not set, default to binary", + "apiVersion: external-secrets.io/v1beta1\r\nkind: SecretStore\r\nmetadata:\r\n name: secretstore-sample-ik\r\nspec:\r\n provider:\r\n aws:\r\n service: SecretsManager\r\n region: eu-west-1\r\n secretsManager:\r\n forceDeleteWithoutRecovery: true\r\n auth:\r\n secretRef:\r\n accessKeyIDSecretRef:\r\n name: awssm-secret\r\n key: access-key\r\n secretAccessKeySecretRef:\r\n name: awssm-secret\r\n key: secret-access-key\r\n---\r\napiVersion: generators.external-secrets.io/v1alpha1\r\nkind: Password\r\nmetadata:\r\n name: my-password\r\n namespace: external-secrets\r\nspec:\r\n length: 12\r\n digits: 5\r\n symbols: 5\r\n symbolCharacters: \"-_$@\"\r\n noUpper: false\r\n allowRepeat: true\r\n---\r\napiVersion: external-secrets.io/v1alpha1\r\nkind: PushSecret\r\nmetadata:\r\n name: pushsecret-to-aws-example # Customisable\r\n namespace: external-secrets # Same of the SecretStores\r\n labels:\r\n this-is-the-label: \"lol\"\r\n annotations:\r\n this-is-the-annotation: \"haha\"\r\nspec:\r\n deletionPolicy: Delete\r\n refreshInterval: 1m # Refresh interval for which push secret will reconcile\r\n secretStoreRefs: # A list of secret stores to push secrets to\r\n - name: secretstore-sample-ik\r\n kind: SecretStore\r\n selector:\r\n generatorRef:\r\n apiVersion: generators.external-secrets.io/v1alpha1\r\n kind: Password\r\n name: my-password\r\n template:\r\n metadata:\r\n annotations:\r\n a-key2: value1\r\n labels:\r\n l-key2: value1\r\n pp.kubernetes.io/part-of: testing\r\n data:\r\n - conversionStrategy: None\r\n match:\r\n secretKey: password # Source Kubernetes secret key to be pushed\r\n remoteRef:\r\n remoteKey: teamb-my-first-parameter-6 # Remote reference (where the secret is going to be pushed)\r\n metadata:\r\n apiVersion: kubernetes.external-secrets.io/v1alpha1\r\n kind: PushSecretMetadata\r\n spec:\r\n kmsKeyID: bb123123-b2b0-4f60-ac3a-44a13f0e6b6c\r\n secretPushFormat: string\r\n description: \"this is key description\"\r\n tags: # Tags to be added to the secret in Azure Key Vault\r\n secret-store: teamb-secret-store\r\n refresh-interval: 1h", + "❯❯ aws secretsmanager list-secrets\r\n❯❯ aws kms list-aliases\r\n❯❯ aws kms list-keys" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security", + "troubleshoot" + ], + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Service", + "Secret", + "Namespace" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/external-secrets/external-secrets/pull/4538", + "repo": "https://github.com/external-secrets/external-secrets", + "pr": "https://github.com/external-secrets/external-secrets/pull/4984" + }, + "reactions": 4, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with external-secrets installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:40:02.140Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-5470-feat-dynamic-target-implementation-for-external-secrets-so.json b/solutions/cncf-generated/external-secrets/external-secrets-5470-feat-dynamic-target-implementation-for-external-secrets-so.json new file mode 100644 index 00000000..d6ae26dc --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-5470-feat-dynamic-target-implementation-for-external-secrets-so.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "external-secrets-5470-feat-dynamic-target-implementation-for-external-secrets-so", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "external-secrets: feat: dynamic target implementation for external secrets sources ", + "description": "## Problem Statement\n\nThis is an implementation of syncing to a custom resource https://github.com/external-secrets/external-secrets/blob/main/design/012-sync-to-custom-resource.md.\n\nThe following changes have been applied to the external secret controller and the external secret object:\n\n- added manifests to ES to signify what kind of object needs to be tracked\n- overhauled the templating to accommodate unstructured objects instead of just secrets\n- added dynamic watches using informers to all ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Problem Statement\n\nThis is an implementation of syncing to a custom resource https://github.com/external-secrets/external-secrets/blob/main/design/012-sync-to-custom-resource.md.\n\nThe following cha" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nfeat(scope): add new feature\r\nfix(scope): fix bug\r\ndocs(scope): update documentation\r\nchore(scope): update build tool or dependencies\r\nref(scope): refactor code\r\nclean(scope): provider cleanup\r\ntest(scope): add tests\r\nperf(scope): improve performance\r\ndesig(scope): improve design\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Generic manifests reconcile is way heavier than standard `Secret` reconcile (as in, core controller couldnt take the same load with one vs the other. I've `pprof` with / without targetting CMs and it's really 20% slower (or more), enough for `workqueue_depth` accumulating with 200~ CMs ( 200~ Secrets works just fine).\n\nI've figured out this happens because we don't have an equivalent of `isSecretValid` method for dynamic targets - we always handle and process them (and emit events, even when there is nothing to be updated) (i.e. there is no data hash to compare to).\n\nThis could be an interesting improvement - that could be tackled further along, just wanted to raise the issue :)", + "codeSnippets": [ + "feat(scope): add new feature\r\nfix(scope): fix bug\r\ndocs(scope): update documentation\r\nchore(scope): update build tool or dependencies\r\nref(scope): refactor code\r\nclean(scope): provider cleanup\r\ntest(scope): add tests\r\nperf(scope): improve performance\r\ndesig(scope): improve design", + "feat(scope): add new feature\r\nfix(scope): fix bug\r\ndocs(scope): update documentation\r\nchore(scope): update build tool or dependencies\r\nref(scope): refactor code\r\nclean(scope): provider cleanup\r\ntest(scope): add tests\r\nperf(scope): improve performance\r\ndesig(scope): improve design", + "{\"level\":\"info\",\"ts\":1761594522.664298,\"logger\":\"controllers.ExternalSecret.informer-manager\",\"msg\":\"registered ExternalSecret with existing informer\",\"gvk\":\"/v1, Kind=ConfigMap\",\"externalSecret\":{\"name\":\"mfa-generator-es\",\"namespace\":\"default\"},\"totalUsers\":2}", + "{\"level\":\"info\",\"ts\":1761594564.7881262,\"logger\":\"controllers.ExternalSecret.informer-manager\",\"msg\":\"registered ExternalSecret with existing informer\",\"gvk\":\"/v1, Kind=ConfigMap\",\"externalSecret\":{\"name\":\"templated-config\",\"namespace\":\"default\"},\"totalUsers\":2}", + "{\"level\":\"info\",\"ts\":1761594588.0394554,\"logger\":\"controllers.ExternalSecret.informer-manager\",\"msg\":\"unregistered ExternalSecret from informer\",\"gvk\":\"/v1, Kind=ConfigMap\",\"externalSecret\":{\"name\":\"mfa-generator-es\",\"namespace\":\"default\"},\"remainingUsers\":1}" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security", + "troubleshoot", + "kind-feature", + "kind-documentation", + "size-l" + ], + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/external-secrets/external-secrets/pull/5470", + "repo": "https://github.com/external-secrets/external-secrets" + }, + "reactions": 5, + "comments": 32, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with external-secrets installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:40:00.214Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-5930-chore-update-controller-runtime.json b/solutions/cncf-generated/external-secrets/external-secrets-5930-chore-update-controller-runtime.json new file mode 100644 index 00000000..6b464d50 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-5930-chore-update-controller-runtime.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "external-secrets-5930-chore-update-controller-runtime", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "external-secrets: chore: update controller runtime", + "description": "Upgrades controller-runtime from v0.22.3 → v0.23.1 and aligns many Kubernetes and ecosystem dependencies to the corresponding newer versions (k8s.io/* → v0.35.0, multiple go-openapi, protobuf, prometheus, golang.org/x/*, etc.).", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Upgrades controller-runtime from v0.22.3 → v0.23.1 and aligns many Kubernetes and ecosystem dependencies to the corresponding newer versions (k8s.io/* → v0.35.0, multiple go-openapi, protobuf, prometh" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nfeat(scope): add new feature\r\nfix(scope): fix bug\r\ndocs(scope): update documentation\r\nchore(scope): update build tool or dependencies\r\nref(scope): refactor code\r\nclean(scope): provider cleanup\r\ntest(scope): add tests\r\nperf(scope): improve performance\r\ndesig(scope): improve design\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/external-secrets/external-secrets/pull/5928. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## Problem Statement\n\nThe external-secrets api module depends on v0.34 of the k8s controller runtime, which can lead to dependency conflicts with other components.\n\n## Related Issue\n\nFixes #...\n\n## Proposed Changes\n\nBump the k8s api dependencies to 0.35.0. This includes a change to how webhooks are registered,\nrequiring that the apitype be passed to `ctrl.NewWebhookManagedBy`.\n\nThis change made it awkward to use the GenericStoreValidator, essentially because the\nwebhookBuilder.WithValidator now uses generics, but we can't add generics to GenericStoreValidator makes it incompatible with kubebuilder code-gen.\n\nAs a workaround, I've removed GenericStoreValidator and added explicit types for SecretStoreValidator and ClusterSecretStoreValidator.\n\n## Format\n\nPlease ensure that your PR follows the following format for the title:\n```\nfeat(scope): add new feature\nfix(scope): fix bug\ndocs(scope): update documentation\nchore(scope): update build tool or dependencies\nref(scope): refactor code\nclean(scope): provider cleanup\ntest(scope): add tests\nperf(scope): improve performance\ndesig(scope): improve design\n```\n\nWhere `scope` is _optionally_ one of:\n- charts\n- release\n- testing\n- security\n- templating\n\n## Checklist\n\n- [", + "codeSnippets": [ + "feat(scope): add new feature\r\nfix(scope): fix bug\r\ndocs(scope): update documentation\r\nchore(scope): update build tool or dependencies\r\nref(scope): refactor code\r\nclean(scope): provider cleanup\r\ntest(scope): add tests\r\nperf(scope): improve performance\r\ndesig(scope): improve design", + "feat(scope): add new feature\r\nfix(scope): fix bug\r\ndocs(scope): update documentation\r\nchore(scope): update build tool or dependencies\r\nref(scope): refactor code\r\nclean(scope): provider cleanup\r\ntest(scope): add tests\r\nperf(scope): improve performance\r\ndesig(scope): improve design", + "feat(scope): add new feature\r\nfix(scope): fix bug\r\ndocs(scope): update documentation\r\nchore(scope): update build tool or dependencies\r\nref(scope): refactor code\r\nclean(scope): provider cleanup\r\ntest(scope): add tests\r\nperf(scope): improve performance\r\ndesig(scope): improve design" + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security", + "troubleshoot", + "kind-documentation", + "kind-dependency", + "kind-chore", + "size-xl" + ], + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/external-secrets/external-secrets/pull/5930", + "repo": "https://github.com/external-secrets/external-secrets", + "pr": "https://github.com/external-secrets/external-secrets/pull/5928" + }, + "reactions": 3, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 65 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with external-secrets installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:40:08.192Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/external-secrets/external-secrets-917-feat-replace-prometheus-annotations-with-servicemonitor.json b/solutions/cncf-generated/external-secrets/external-secrets-917-feat-replace-prometheus-annotations-with-servicemonitor.json new file mode 100644 index 00000000..3bcc2c03 --- /dev/null +++ b/solutions/cncf-generated/external-secrets/external-secrets-917-feat-replace-prometheus-annotations-with-servicemonitor.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "external-secrets-917-feat-replace-prometheus-annotations-with-servicemonitor", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "external-secrets: feat: replace prometheus annotations with servicemonitor", + "description": "This PR replaces prometheus annotations with ServiceMonitor implementation.\n\nchanges:\n- remove config option to specify metrics port (it's not directly configurable through helmchart, and there should be no need to)\n- add dedicated `$component-servicemonitor.yaml` file that contains service + servicemonitor for each component \n\nScreenshot with all enabled service monitors\n![poc-metrics](https://user-images.githubusercontent.com/1709030/160910429-5bb80fef-404a-404b-bdb3-34c76c1de93c.png)\n\nSuperse", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR replaces prometheus annotations with ServiceMonitor implementation.\n\nchanges:\n- remove config option to specify metrics port (it's not directly configurable through helmchart, and there should" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nhelm install prometheus prometheus-community/kube-prometheus-stack\r\nhelm upgrade --install eso --debug --wait --timeout=1m \\\r\n ./deploy/charts/external-secrets \\\r\n --set installCRDs=true --set serviceMonitor.enabled=true \\\r\n --set serviceMonitor.additionalLabels.release=prometheus \\\r\n --set webhook.serviceMonitor.enabled=true \\\r\n --set webhook.serviceMonitor.additionalLabels.release=prometheus \\\r\n --set certController.serviceMonitor.enabled=true \\\r\n --set certController.serviceMoni\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/external-secrets/external-secrets/pull/779. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "See #361", + "codeSnippets": [ + "helm install prometheus prometheus-community/kube-prometheus-stack\r\nhelm upgrade --install eso --debug --wait --timeout=1m \\\r\n ./deploy/charts/external-secrets \\\r\n --set installCRDs=true --set serviceMonitor.enabled=true \\\r\n --set serviceMonitor.additionalLabels.release=prometheus \\\r\n --set webhook.serviceMonitor.enabled=true \\\r\n --set webhook.serviceMonitor.additionalLabels.release=prometheus \\\r\n --set certController.serviceMonitor.enabled=true \\\r\n --set certController.serviceMoni", + "helm install prometheus prometheus-community/kube-prometheus-stack\r\nhelm upgrade --install eso --debug --wait --timeout=1m \\\r\n ./deploy/charts/external-secrets \\\r\n --set installCRDs=true --set serviceMonitor.enabled=true \\\r\n --set serviceMonitor.additionalLabels.release=prometheus \\\r\n --set webhook.serviceMonitor.enabled=true \\\r\n --set webhook.serviceMonitor.additionalLabels.release=prometheus \\\r\n --set certController.serviceMonitor.enabled=true \\\r\n --set certController.serviceMonitor.additionalLabels.release=prometheus\r\n\r\n# then \r\nkubectl port-forward svc/prometheus-kube-prometheus-prometheus 9090:9090\r\n\r\n# visit: http://localhost:9090/targets", + "{{- if .Values.prometheus.enabled }}\r\nThe flag `prometheus.enabled` is deprecated and will be removed in the next release. Please use `servicemonitor.enabled` instead.\r\n{{- end }}", + "[...]\r\nexternal-secrets has been deployed successfully!\r\n\r\nIn order to begin using ExternalSecrets, you will need to set up a SecretStore\r\nor ClusterSecretStore resource (for example, by creating a 'vault' SecretStore).\r\n\r\nMore information on the different types of SecretStores and how to configure them\r\ncan be found in our Github: https://github.com/external-secrets/external-secrets\r\n\r\ndeprecation warning:\r\n> The flag `prometheus.enabled` is deprecated and will be removed in the next release.\r\n Please migrate to using servicemonitor instead." + ] + } + }, + "metadata": { + "tags": [ + "external-secrets", + "sandbox", + "security", + "troubleshoot" + ], + "cncfProjects": [ + "external-secrets" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/external-secrets/external-secrets/pull/917", + "repo": "https://github.com/external-secrets/external-secrets", + "pr": "https://github.com/external-secrets/external-secrets/pull/779" + }, + "reactions": 5, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with external-secrets installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:39:58.763Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/flagger/flagger-593-implement-progressive-promotion.json b/solutions/cncf-generated/flagger/flagger-593-implement-progressive-promotion.json new file mode 100644 index 00000000..e661ca3a --- /dev/null +++ b/solutions/cncf-generated/flagger/flagger-593-implement-progressive-promotion.json @@ -0,0 +1,78 @@ +{ + "version": "kc-mission-v1", + "name": "flagger-593-implement-progressive-promotion", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "flagger: Implement progressive promotion", + "description": "This PR adds a new field to the Canary spec `analysis.stepWeightPromotion`. When `stepWeightPromotion` is specified, the promotion phase happens in stages, the traffic is routed back to the primary pods in a progressive manner, the primary weight is increased until it reaches 100%. This way the HPA has time to scale up the primary replicas and scale down the canary ones. \n\nFix: #381\n\nFor testing:\n\n```bash\n# update CRDs\nkubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/progres", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR adds a new field to the Canary spec `analysis.stepWeightPromotion`. When `stepWeightPromotion` is specified, the promotion phase happens in stages, the traffic is routed back to the primary po" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n# update CRDs\r\nkubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/progressive-promotion/artifacts/flagger/crd.yaml\r\n\r\n# replace Flagger image\r\nkubectl -n istio-system set image deployment/flagger \\\r\nflagger=stefanprodan/flagger:prom-weight.1\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Thanks @maruina for testing this 👍 \n\nKubernetes events get compacted so the best way to monitor Flagger is by tailing the logs with jq:\n\n```\nkubectl -n istio-system logs deploy/flagger -f | jq .msg\n```", + "codeSnippets": [ + "# update CRDs\r\nkubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/progressive-promotion/artifacts/flagger/crd.yaml\r\n\r\n# replace Flagger image\r\nkubectl -n istio-system set image deployment/flagger \\\r\nflagger=stefanprodan/flagger:prom-weight.1", + "# update CRDs\r\nkubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/progressive-promotion/artifacts/flagger/crd.yaml\r\n\r\n# replace Flagger image\r\nkubectl -n istio-system set image deployment/flagger \\\r\nflagger=stefanprodan/flagger:prom-weight.1", + "| [Impacted Files](https://codecov.io/gh/weaveworks/flagger/pull/593?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [pkg/controller/scheduler.go](https://codecov.io/gh/weaveworks/flagger/pull/593/diff?src=pr&el=tree#diff-cGtnL2NvbnRyb2xsZXIvc2NoZWR1bGVyLmdv) | `44.63% <40.00%> (-0.04%)` | :arrow_down: |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/weaveworks/flagger/pull/593?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/weaveworks/flagger/pull/593?src=pr&el=footer). Last update [0056b99...be96a11](https://codecov.io/gh/weaveworks/flagger/pull/593?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n\nHey @stefanprodan, I was able to test it and it works really well. Thank you!\r\n\r\nThis is the Canary spec", + "those are the events", + "and this is what I see in my graphs\r\n\r\n\"image\"\r\n\r\n(note that I used a small stepWeightPromotion just to make it very evident on the graphs)\nThanks @maruina for testing this 👍 \r\n\r\nKubernetes events get compacted so the best way to monitor Flagger is by tailing the logs with jq:" + ] + } + }, + "metadata": { + "tags": [ + "flagger", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "flagger" + ], + "targetResourceKinds": [ + "Pod", + "Deployment" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/fluxcd/flagger/pull/593", + "repo": "https://github.com/fluxcd/flagger" + }, + "reactions": 10, + "comments": 4, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with flagger installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:22.402Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/flagger/flagger-670-skipper-ingress-controller-support.json b/solutions/cncf-generated/flagger/flagger-670-skipper-ingress-controller-support.json new file mode 100644 index 00000000..b1167dc2 --- /dev/null +++ b/solutions/cncf-generated/flagger/flagger-670-skipper-ingress-controller-support.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "flagger-670-skipper-ingress-controller-support", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "flagger: Skipper Ingress Controller support", + "description": "fix: #452\n\n## ✨ zalan.do/Skipper Router Implementation\n\nAn HTTP router and reverse proxy for service composition, including use cases like Kubernetes Ingress\nhttps://github.com/zalando/skipper/\n\n* The concept is to define routes with specific weights via the skipper specific annotation predicate of `zalando.org/backend-weights`.\n* apex Ingress is immutable\n* A new \"canary ingress\" is created with to paths for primary and canary service that has higher \"weight\" hence receiving all traffic, which", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "fix: #452\n\n## ✨ zalan.do/Skipper Router Implementation\n\nAn HTTP router and reverse proxy for service composition, including use cases like Kubernetes Ingress\nhttps://github.com/zalando/skipper/\n\n* The" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n#!/usr/bin/env bash\r\nREPO_ROOT=$(git rev-parse --show-toplevel)\r\ncd $REPO_ROOT\r\n\r\nmake test\r\nmake build\r\ndocker tag weaveworks/flagger:latest test/flagger:latest\r\nmake loadtester-build\r\n(kind get clusters && kubectl delete ns/test --force) || kind create cluster --wait 5m --image kindest/node:v1.16.9\r\n./test/e2e-skipper.sh\r\n# port forward prometheus UI to localhost:9090\r\nkubectl port-forward $(kubectl get pods -l=app=flagger-prometheus -o name -n flagger-system | head -n 1) 9090:9090 -n flagger-\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/o11n/flagger/pull/15. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "📝 add documentation about Skipper Ingress\n\nSkipper Ingress Controller support is added with\nhttps://github.com/weaveworks/flagger/pull/670.\n\nThis commit add the documentation and links to mention\nSkipper is now an available option.\n\nCurrently only Canary deployments are supported.", + "codeSnippets": [ + "#!/usr/bin/env bash\r\nREPO_ROOT=$(git rev-parse --show-toplevel)\r\ncd $REPO_ROOT\r\n\r\nmake test\r\nmake build\r\ndocker tag weaveworks/flagger:latest test/flagger:latest\r\nmake loadtester-build\r\n(kind get clusters && kubectl delete ns/test --force) || kind create cluster --wait 5m --image kindest/node:v1.16.9\r\n./test/e2e-skipper.sh\r\n# port forward prometheus UI to localhost:9090\r\nkubectl port-forward $(kubectl get pods -l=app=flagger-prometheus -o name -n flagger-system | head -n 1) 9090:9090 -n flagger-", + "#!/usr/bin/env bash\r\nREPO_ROOT=$(git rev-parse --show-toplevel)\r\ncd $REPO_ROOT\r\n\r\nmake test\r\nmake build\r\ndocker tag weaveworks/flagger:latest test/flagger:latest\r\nmake loadtester-build\r\n(kind get clusters && kubectl delete ns/test --force) || kind create cluster --wait 5m --image kindest/node:v1.16.9\r\n./test/e2e-skipper.sh\r\n# port forward prometheus UI to localhost:9090\r\nkubectl port-forward $(kubectl get pods -l=app=flagger-prometheus -o name -n flagger-system | head -n 1) 9090:9090 -n flagger-system &\r\n\r\n./test/e2e-skipper-tests.sh", + "{\"level\":\"info\",\"ts\":\"2020-08-14T10:07:36.357Z\",\"caller\":\"controller/events.go:28\",\"msg\":\"Halt podinfo.test advancement request duration 915ms > 500ms\",\"canary\":\"podinfo.test\"} \r\n{\"level\":\"debug\",\"ts\":\"2020-08-14T10:07:36.358Z\",\"logger\":\"event-broadcaster\",\"caller\":\"record/event.go:278\",\"msg\":\"Event(v1.ObjectReference{Kind:\\\"Canary\\\", Namespace:\\\"test\\\", Name:\\\"podinfo\\\", UID:\\\"c1162c0e-409a-4825-9dac-a0a6c3418e72\\\", APIVersion:\\\"flagger.app/v1beta1\\\", ResourceVersion:\\\"3704\\\", FieldPath:\\\"\\\"}): type: 'Warning' reason: 'Synced' Halt podinfo.test advancement request duration 915ms > 500ms\"} \r\n{\"level\":\"debug\",\"ts\":\"2020-08-14T10:07:51.341Z\",\"caller\":\"router/skipper.go:155\",\"msg\":\"GetRoutes primaryWeight: 60, canaryWeight: 40\",\"GetRoutes\":\"podinfo.test\"} \r\n{\"level\":\"info\",\"ts\":\"2020-08-14T10:07:51.348Z\",\"caller\":\"controller/events.go:28\",\"msg\":\"Rolling back podinfo.test failed checks threshold reached 5\",\"canary\":\"podinfo.test\"}" + ] + } + }, + "metadata": { + "tags": [ + "flagger", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "flagger" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Ingress", + "Namespace", + "Node" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/fluxcd/flagger/pull/670", + "repo": "https://github.com/fluxcd/flagger", + "pr": "https://github.com/o11n/flagger/pull/15" + }, + "reactions": 7, + "comments": 4, + "synthesizedBy": "regex", + "qualityScore": 66 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with flagger installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:25.138Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluent-bit/fluent-bit-1166-gelf-replace-with-in-keys-when-converting-from-msgpack.json b/solutions/cncf-generated/fluent-bit/fluent-bit-1166-gelf-replace-with-in-keys-when-converting-from-msgpack.json new file mode 100644 index 00000000..f9c42fb4 --- /dev/null +++ b/solutions/cncf-generated/fluent-bit/fluent-bit-1166-gelf-replace-with-in-keys-when-converting-from-msgpack.json @@ -0,0 +1,75 @@ +{ + "version": "kc-mission-v1", + "name": "fluent-bit-1166-gelf-replace-with-in-keys-when-converting-from-msgpack", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "fluent-bit: [gelf] Replace '/' with '_' in keys when converting from msgpack.", + "description": "Allows logs form Kubernetes pods having labels or annotations containing '/' in the name, which is not a valid GELF field name.\n\nAlso changes the log level to `error` and adds the invalid char position to \"invalid key char\" errors.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Allows logs form Kubernetes pods having labels or annotations containing '/' in the name, which is not a valid GELF field name.\n\nAlso changes the log level to `error` and adds the invalid char positio" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nvoid * a = NULL;\r\n void * b = NULL;\r\n void * c = NULL;\r\n void * result = NULL;\r\n\r\n a = malloc(len+1);\r\n ...\r\n {\r\n if (!b) goto cleanup; /* Uh oh! Exit gracefully! */\r\n ...\r\n }\r\n /* Normal path of execution gets here */\r\n result = ... /* Not a, b or c! */\r\n\r\n /* Do the necessary cleanup, whether or not there was an error */\r\n\r\ncleanup:\r\n free(a);\r\n free(b);\r\n free(c);\r\n return result;\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "In C++ this tends to be less of an issue, of course.\n\nThe sort of pattern I suggest is something like:\n\n```\n void * a = NULL;\n void * b = NULL;\n void * c = NULL;\n void * result = NULL;\n\n a = malloc(len+1);\n ...\n {\n if (!b) goto cleanup; /* Uh oh! Exit gracefully! */\n ...\n }\n /* Normal path of execution gets here */\n result = ... /* Not a, b or c! */\n\n /* Do the necessary cleanup, whether or not there was an error */\n\ncleanup:\n free(a);\n free(b);\n free(c);\n return result;\n```\n\nIt's one of the few remaining legitimate uses of `goto` but a good one, in my opinion.\nAssuming `free(NULL)` is harmless.", + "codeSnippets": [ + "void * a = NULL;\r\n void * b = NULL;\r\n void * c = NULL;\r\n void * result = NULL;\r\n\r\n a = malloc(len+1);\r\n ...\r\n {\r\n if (!b) goto cleanup; /* Uh oh! Exit gracefully! */\r\n ...\r\n }\r\n /* Normal path of execution gets here */\r\n result = ... /* Not a, b or c! */\r\n\r\n /* Do the necessary cleanup, whether or not there was an error */\r\n\r\ncleanup:\r\n free(a);\r\n free(b);\r\n free(c);\r\n return result;", + "void * a = NULL;\r\n void * b = NULL;\r\n void * c = NULL;\r\n void * result = NULL;\r\n\r\n a = malloc(len+1);\r\n ...\r\n {\r\n if (!b) goto cleanup; /* Uh oh! Exit gracefully! */\r\n ...\r\n }\r\n /* Normal path of execution gets here */\r\n result = ... /* Not a, b or c! */\r\n\r\n /* Do the necessary cleanup, whether or not there was an error */\r\n\r\ncleanup:\r\n free(a);\r\n free(b);\r\n free(c);\r\n return result;", + ", just in case.\nFor the sake of portability, I'd recommend the NULL check. I do see NULL checks elsewhere in the codebase:" + ] + } + }, + "metadata": { + "tags": [ + "fluent-bit", + "graduated", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "fluent-bit" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/fluent/fluent-bit/pull/1166", + "repo": "https://github.com/fluent/fluent-bit" + }, + "reactions": 3, + "comments": 15, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with fluent-bit installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:14.172Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluent-bit/fluent-bit-2043-in-tail-support-multiline-functionality-not-filter-in-dockermode.json b/solutions/cncf-generated/fluent-bit/fluent-bit-2043-in-tail-support-multiline-functionality-not-filter-in-dockermode.json new file mode 100644 index 00000000..f698405a --- /dev/null +++ b/solutions/cncf-generated/fluent-bit/fluent-bit-2043-in-tail-support-multiline-functionality-not-filter-in-dockermode.json @@ -0,0 +1,76 @@ +{ + "version": "kc-mission-v1", + "name": "fluent-bit-2043-in-tail-support-multiline-functionality-not-filter-in-dockermode", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "fluent-bit: in_tail: Support multiline functionality (not filter) in dockermode", + "description": "Use Parser to verify if line is valid first line while using dockermode.\n\nFor example input:\n```\n{\"log\":\"2020-03-24 Test line of first log\\n\"}\n{\"log\":\" Another line of first log\\n\"}\n{\"log\":\"2020-03-24 Test line of second log\\n\"}\n\n```\n\nAnd regex: `Regex (?^{\"log\":\"\\d{4}-\\d{2}-\\d{2}.*)`\n\nWill output:\n```\n[0] containers.var.log.containers.test.log: [1585071971.732015500,\n {\"log\"=>\"{\"log\":\"2020-03-24 Test line of first log\n \\n Another line of first log\\n\"}\"}]\n[1] conta", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Use Parser to verify if line is valid first line while using dockermode.\n\nFor example input:\n```\n{\"log\":\"2020-03-24 Test line of first log\\n\"}\n{\"log\":\" Another line of first log\\n\"}\n{\"log\":\"" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n{\"log\":\"2020-03-24 Test line of first log\\n\"}\r\n{\"log\":\" Another line of first log\\n\"}\r\n{\"log\":\"2020-03-24 Test line of second log\\n\"}\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Valgrind\n\n```\nroot@b155d80f0a41:/tmp/src/build# rm /tail-db/tail-containers-state.db; valgrind --leak-check=full /fluent-bit/bin/fluent-bit -c /fluent-bit/etc/fluent-bit.conf\nrm: cannot remove '/tail-db/tail-containers-state.db': No such file or directory\n==119== Memcheck, a memory error detector\n==119== Copyright (C) 2002-2017, and GNU GPL'd, by Julian Seward et al.\n==119== Using Valgrind-3.14.0 and LibVEX; rerun with -h for copyright info\n==119== Command: /fluent-bit/bin/fluent-bit -c /fluent-bit/etc/fluent-bit.conf\n==119== \nFluent Bit v1.4.0\n* Copyright (C) 2019-2020 The Fluent Bit Authors\n* Copyright (C) 2015-2018 Treasure Data\n* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd\n* https://fluentbit.io\n\n[2020/03/26 13:09:14] [ info] Configuration:\n[2020/03/26 13:09:14] [ info] flush time | 5.000000 seconds\n[2020/03/26 13:09:14] [ info] grace | 5 seconds\n[2020/03/26 13:09:14] [ info] daemon | 0\n[2020/03/26 13:09:14] [ info] ___________\n[2020/03/26 13:09:14] [ info] inputs:\n[2020/03/26 13:09:14] [ info] tail\n[2020/03/26 13:09:14] [ info] ___________\n[2020/03/26 13:09:14] [ info] filters:\n[2020/03/26 13:09:14] [ info] ___________\n[2020/03/26 13:09:14] [ info] outputs:\n[2020/03/26 13:09:14] [ info] stdout.0\n[2020/03/26 13:09:14] [ info] ___________\n[2020/03/26 13:09:14] [ info] collectors:\n[2020/03/26 13:09:14] [debug] [storage] [cio stream] new stream registered: tail.0\n[2020/03/26 13:09:14] [ info] [storage] version=1.0.3, i", + "codeSnippets": [ + "{\"log\":\"2020-03-24 Test line of first log\\n\"}\r\n{\"log\":\" Another line of first log\\n\"}\r\n{\"log\":\"2020-03-24 Test line of second log\\n\"}", + "{\"log\":\"2020-03-24 Test line of first log\\n\"}\r\n{\"log\":\" Another line of first log\\n\"}\r\n{\"log\":\"2020-03-24 Test line of second log\\n\"}", + "[0] containers.var.log.containers.test.log: [1585071971.732015500,\r\n {\"log\"=>\"{\"log\":\"2020-03-24 Test line of first log\r\n \\n Another line of first log\\n\"}\"}]\r\n[1] containers.var.log.containers.test.log: [1585071975.000917200,\r\n {\"log\"=>\"{\"log\":\"2020-03-24 Test line of second log\\n\"}\"}]", + "[0] containers.var.log.containers.test.log: [1585071971.732015500,\r\n {\"log\"=>\"2020-03-24 Test line of first log\r\n \\n Another line of first log\\n\"}]\r\n[1] containers.var.log.containers.test.log: [1585071975.000917200,\r\n {\"log\"=>\"2020-03-24 Test line of second log\\n\"}]", + "[0] containers.var.log.containers.test.log: [1585071971.732015500,\r\n {\"log\"=>\"{\"log\":\"2020-03-24 Test line of first log\r\n \\n Another line of first log\\n\"}\"}]\r\n[1] containers.var.log.containers.test.log: [1585071975.000917200,\r\n {\"log\"=>\"{\"log\":\"2020-03-24 Test line of second log\\n\"}\"}]" + ] + } + }, + "metadata": { + "tags": [ + "fluent-bit", + "graduated", + "observability", + "troubleshoot", + "waiting-for-user" + ], + "cncfProjects": [ + "fluent-bit" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/fluent/fluent-bit/pull/2043", + "repo": "https://github.com/fluent/fluent-bit" + }, + "reactions": 15, + "comments": 40, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with fluent-bit installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:59.616Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluent-bit/fluent-bit-2783-retrieve-kubernetes-pod-ip-address-if-it-is-set-in-status-podip.json b/solutions/cncf-generated/fluent-bit/fluent-bit-2783-retrieve-kubernetes-pod-ip-address-if-it-is-set-in-status-podip.json new file mode 100644 index 00000000..70255748 --- /dev/null +++ b/solutions/cncf-generated/fluent-bit/fluent-bit-2783-retrieve-kubernetes-pod-ip-address-if-it-is-set-in-status-podip.json @@ -0,0 +1,75 @@ +{ + "version": "kc-mission-v1", + "name": "fluent-bit-2783-retrieve-kubernetes-pod-ip-address-if-it-is-set-in-status-podip", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "fluent-bit: Retrieve Kubernetes pod IP address if it is set in status.podIP", + "description": "This PR exposes the Kubernetes pod IP address if it has been set.\n\nFixes: https://github.com/fluent/fluent-bit/issues/2301\n\n----\nEnter `[N/A]` in the box, if an item is not applicable to your change.\n\n**Testing**\nBefore we can approve your change; please submit the following in a comment:\n- [ N/A ] Example configuration file for the change\n- [ N/A ] Debug log output from testing the change\n\n- [ N/A ] Attached [Valgrind](https://valgrind.org/docs/manual/quick-start.html) output that shows no leak", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR exposes the Kubernetes pod IP address if it has been set.\n\nFixes: https://github.com/fluent/fluent-bit/issues/2301\n\n----\nEnter `[N/A]` in the box, if an item is not applicable to your change.\n" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nfilter_kubernetes.c:156: Check comparing expected record with actual record... failed\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Hi to all who've commented 👋 \n\nIf there is anybody who would like to take over, feel free to. I've since moved on to other engineering domains so am no longer active in this space. \n\nAfter this PR had no interest from the maintainers within the first two years of it being opened I gave up any hope of it ever being merged. \n\nI still believe the intent behind this PR holds merit that many would benefit from and hope that the maintainers see from the continued activity within this PR that people are still looking for a solution.", + "codeSnippets": [ + "filter_kubernetes.c:156: Check comparing expected record with actual record... failed", + "filter_kubernetes.c:156: Check comparing expected record with actual record... failed" + ] + } + }, + "metadata": { + "tags": [ + "fluent-bit", + "graduated", + "observability", + "troubleshoot", + "docs-required" + ], + "cncfProjects": [ + "fluent-bit" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/fluent/fluent-bit/pull/2783", + "repo": "https://github.com/fluent/fluent-bit" + }, + "reactions": 10, + "comments": 24, + "synthesizedBy": "regex", + "qualityScore": 65 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with fluent-bit installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:00.766Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluent-bit/fluent-bit-4197-in-tail-create-stream-id-by-file-inode-4190.json b/solutions/cncf-generated/fluent-bit/fluent-bit-4197-in-tail-create-stream-id-by-file-inode-4190.json new file mode 100644 index 00000000..6e305c05 --- /dev/null +++ b/solutions/cncf-generated/fluent-bit/fluent-bit-4197-in-tail-create-stream-id-by-file-inode-4190.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "fluent-bit-4197-in-tail-create-stream-id-by-file-inode-4190", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "fluent-bit: in_tail: create stream_id by file inode(#4190)", + "description": "Currently, stream_id of multiline is created by filename.\nIt means stream_id of rotated file will be same id since the filename is same.\n\nThis patch is to create stream_id from inode string.\nIt will create unique stream_id even if the file is rotated.\n\n----\nEnter `[N/A]` in the box, if an item is not applicable to your change.\n\n**Testing**\nBefore we can approve your change; please submit the following in a comment:\n- [X] Example configuration file for the change\n- [X] Debug log output from testi", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Currently, stream_id of multiline is created by filename.\nIt means stream_id of rotated file will be same id since the filename is same.\n\nThis patch is to create stream_id from inode string.\nIt will c" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n[SERVICE]\r\n Flush 1\r\n Daemon Off\r\n Log_Level info\r\n HTTP_Monitor Off\r\n HTTP_Port 2020\r\n storage.path /tmp/fluent-bit-data/\r\n storage.sync normal\r\n storage.checksum off\r\n storage.max_chunks_up 128\r\n storage.backlog.mem_limit 512M\r\n storage.metrics on\r\n\r\n[INPUT]\r\n Name tail\r\n Path /tmp/logs/*.log\r\n multiline.parser cri\r\n Tag kube.*\r\n Refresh_I\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kube-logging/logging-operator/pull/884. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "| Q | A\n| --------------- | ---\n| Bug fix? | no\n| New feature? | yes\n| API breaks? | no\n| Deprecations? | no\n| Related tickets | fixes #868\n| License | Apache 2.0\n\n### What's in this PR?\nSupport for using `multiline.parser` in fluentbit.\n\n### Why?\nCurrently some container runtimes like containerd & cri can write longer log messages separated into multiple lines in the log file. A special flag is used to indicate if the message is partial (additional lines will follow) or full. You can handle this by using `Parser_Firstline` & `Parser_N` fields but with new multiline core support introduced in fluentbit 1.8 this can be achieved by only setting `multiline.parser` to the appropriate value and fluentbit handles this automatically. Multiple parser can be added which allow different node types to be scraped by the same fluentbit configuration. See more on https://docs.fluentbit.io/manual/pipeline/inputs/tail#multiline-support.\n\n### Additional context\n`fluentbit v1.8.9` is needed because of https://github.com/fluent/fluent-bit/pull/4197.\n\nThis was tested on one of our clusters using `logging-operator v3.15.0` and manually overriding fluentbit config using `customConfigSecret`. Only `multiline.parser` line was added to the tail INPUT. The only side-effect we noticed was that the message field changes from `message` to `log` after parsing.\n\nThe change seems straight-forward and simple but can be", + "codeSnippets": [ + "[SERVICE]\r\n Flush 1\r\n Daemon Off\r\n Log_Level info\r\n HTTP_Monitor Off\r\n HTTP_Port 2020\r\n storage.path /tmp/fluent-bit-data/\r\n storage.sync normal\r\n storage.checksum off\r\n storage.max_chunks_up 128\r\n storage.backlog.mem_limit 512M\r\n storage.metrics on\r\n\r\n[INPUT]\r\n Name tail\r\n Path /tmp/logs/*.log\r\n multiline.parser cri\r\n Tag kube.*\r\n Refresh_I", + "[SERVICE]\r\n Flush 1\r\n Daemon Off\r\n Log_Level info\r\n HTTP_Monitor Off\r\n HTTP_Port 2020\r\n storage.path /tmp/fluent-bit-data/\r\n storage.sync normal\r\n storage.checksum off\r\n storage.max_chunks_up 128\r\n storage.backlog.mem_limit 512M\r\n storage.metrics on\r\n\r\n[INPUT]\r\n Name tail\r\n Path /tmp/logs/*.log\r\n multiline.parser cri\r\n Tag kube.*\r\n Refresh_Interval 1\r\n Mem_Buf_Limit 50MB\r\n Buffer_Chunk_Size 1MB\r\n Buffer_Max_Size 2MB\r\n Skip_Empty_Lines On\r\n Skip_Long_Lines On\r\n Rotate_Wait 5\r\n DB /tmp/tail-containers-state.db\r\n DB.Sync Normal\r\n storage.type filesystem\r\n\r\n[OUTPUT]\r\n Name stdout\r\n Match *", + "/tmp/logs/*.log {\r\n\trotate 5\r\n\tmonthly\r\n\trotate 12\r\n\tcompress\r\n\tdelaycompress\r\n\tmissingok\r\n\tnotifempty\r\n\tcreate 644 root root\r\n}", + "$ ../bin/fluent-bit -c a.conf \r\nFluent Bit v1.9.0\r\n* Copyright (C) 2019-2021 The Fluent Bit Authors\r\n* Copyright (C) 2015-2018 Treasure Data\r\n* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd\r\n* https://fluentbit.io\r\n\r\n[2021/10/17 12:25:58] [ info] [engine] started (pid=26733)\r\n[2021/10/17 12:25:58] [ info] [storage] version=1.1.4, initializing...\r\n[2021/10/17 12:25:58] [ info] [storage] root path '/tmp/fluent-bit-data/'\r\n[2021/10/17 12:25:58] [ info] [storage] normal synchronization mode, checksum disabled, max_chunks_up=128\r\n[2021/10/17 12:25:58] [ info] [storage] backlog input plugin: storage_backlog.1\r\n[2021/10/17 12:25:58] [ info] [cmetrics] version=0.2.2\r\n[2021/10/17 12:25:58] [ info] [input:tail:tail.0] multiline core started\r\n[2021/10/17 12:25:58] [ info] [input:storage_backlog:storage_backlog.1] queue memory limit: 488.3M\r\n[2021/10/17 12:25:58] [ info] [sp] stream processor started\r\n[2021/10/17 12:25:58] [ info] [input:tail:tail.0] inotify_fs_add(): inode=1703973 watch_fd=1 name=/tmp/logs/x.log\r\n[0] kube.tmp.logs.x.log: [1634441162.278157692, {\"time\"=>\"2021-10-17T03:26:02.278157692Z\", \"stream\"=>\"stdout\", \"_p\"=>\"F\", \"log\"=>\"testing...\"}]\r\n[2021/10/17 12:26:05] [ info] [input:tail:tail.0] inode=1703973 handle rotation(): /tmp/logs/x.log => /tmp/logs/x.log.1\r\n[2021/10/17 12:26:05] [ info] [input:tail:tail.0] inotify_fs_remove(): inode=1703973 watch_fd=1\r\n[2021/10/17 12:26:05] [ info] [input:tail:tail.0] inotify_fs_add(): inode=1703973 watch_fd=2 name=/tmp/logs/x.log.1\r\n[2021/10/17 12:26:05] [ info] [input:tail:tail.0] inotify_fs_add(): inode=1703983 watch_fd=3 name=/tmp/logs/x.log\r\n[0] kube.tmp.logs.x.log: [1634441170.868503898, {\"time\"=>\"2021-10-17T03:26:10.868503898Z\", \"stream\"=>\"stdout\", \"_p\"=>\"F\", \"log\"=>\"testing...\"}]\r\n[0] kube.tmp.logs.x.log: [1634441172.198605785, {\"time\"=>\"2021-10-17T03:26:12.198605785Z\", \"stream\"=>\"stdout\", \"_p\"=>\"F\", \"log\"=>\"testing...\"}]\r\n[2021/10/17 12:26:13] [ info] [input:tail:tail.0] inotify_fs_remove(): inode=1703973 watch_fd=2\r\n^C[2021/10/17 12:26:13] [engine] caught signal (SIGINT)\r\n[2021/10/17 12:26:13] [ info] [input] pausing tail.0\r\n[2021/10/17 12:26:13] [ info] [input] pausing storage_backlog.1\r\n[2021/10/17 12:26:13] [ warn] [engine] service will stop in 5 seconds\r\n[2021/10/17 12:26:18] [ info] [engine] service stopped\r\n[2021/10/17 12:26:18] [ info] [input:tail:tail.0] inotify_fs_remove(): inode=1703983 watch_fd=3", + "$ valgrind --leak-check=full ../bin/fluent-bit -c a.conf \r\n==26743== Memcheck, a memory error detector\r\n==26743== Copyright (C) 2002-2017, and GNU GPL'd, by Julian Seward et al.\r\n==26743== Using Valgrind-3.15.0 and LibVEX; rerun with -h for copyright info\r\n==26743== Command: ../bin/fluent-bit -c a.conf\r\n==26743== \r\nFluent Bit v1.9.0\r\n* Copyright (C) 2019-2021 The Fluent Bit Authors\r\n* Copyright (C) 2015-2018 Treasure Data\r\n* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd\r\n* https://fluentbit.io\r\n\r\n[2021/10/17 12:27:31] [ info] [engine] started (pid=26743)\r\n[2021/10/17 12:27:31] [ info] [storage] version=1.1.4, initializing...\r\n[2021/10/17 12:27:31] [ info] [storage] root path '/tmp/fluent-bit-data/'\r\n[2021/10/17 12:27:31] [ info] [storage] normal synchronization mode, checksum disabled, max_chunks_up=128\r\n[2021/10/17 12:27:31] [ info] [storage] backlog input plugin: storage_backlog.1\r\n[2021/10/17 12:27:31] [ info] [cmetrics] version=0.2.2\r\n[2021/10/17 12:27:32] [ info] [input:tail:tail.0] multiline core started\r\n[2021/10/17 12:27:32] [ info] [input:storage_backlog:storage_backlog.1] queue memory limit: 488.3M\r\n[2021/10/17 12:27:32] [ info] [sp] stream processor started\r\n[2021/10/17 12:27:32] [ info] [input:tail:tail.0] inotify_fs_add(): inode=1703983 watch_fd=1 name=/tmp/logs/x.log\r\n==26743== Warning: client switching stacks? SP change: 0x57e59c8 --> 0x4db4ac0\r\n==26743== to suppress, use: --max-stackframe=10686216 or greater\r\n==26743== Warning: client switching stacks? SP change: 0x4db4a38 --> 0x57e59c8\r\n==26743== to suppress, use: --max-stackframe=10686352 or greater\r\n==26743== Warning: client switching stacks? SP change: 0x57e59c8 --> 0x4db4a38\r\n==26743== to suppress, use: --max-stackframe=10686352 or greater\r\n==26743== further instances of this message will not be shown.\r\n[0] kube.tmp.logs.x.log: [1634441254.498786373, {\"time\"=>\"2021-10-17T03:27:34.498786373Z\", \"stream\"=>\"stdout\", \"_p\"=>\"F\", \"log\"=>\"testing...\"}]\r\n[2021/10/17 12:27:38] [ info] [input:tail:tail.0] inode=1703983 handle rotation(): /tmp/logs/x.log => /tmp/logs/x.log.1\r\n[2021/10/17 12:27:38] [ info] [input:tail:tail.0] inotify_fs_remove(): inode=1703983 watch_fd=1\r\n[2021/10/17 12:27:38] [ info] [input:tail:tail.0] inotify_fs_add(): inode=1703983 watch_fd=2 name=/tmp/logs/x.log.1\r\n[2021/10/17 12:27:38] [ info] [input:tail:tail.0] inotify_fs_add(): inode=1703973 watch_fd=3 name=/tmp/logs/x.log\r\n[0] kube.tmp.logs.x.log: [1634441262.603936634, {\"time\"=>\"2021-10-17T03:27:42.603936634Z\", \"stream\"=>\"stdout\", \"_p\"=>\"F\", \"log\"=>\"testing...\"}]\r\n[0] kube.tmp.logs.x.log: [1634441264.109924829, {\"time\"=>\"2021-10-17T03:27:44.109924829Z\", \"stream\"=>\"stdout\", \"_p\"=>\"F\", \"log\"=>\"testing...\"}]\r\n^C[2021/10/17 12:27:46] [2021/10/17 12:27:46] [ info] [input:tail:tail.0] inotify_fs_remove(): inode=1703983 watch_fd=2\r\n[engine] caught signal (SIGINT)\r\n[2021/10/17 12:27:46] [ info] [input] pausing tail.0\r\n[2021/10/17 12:27:46] [ info] [input] pausing storage_backlog.1\r\n[2021/10/17 12:27:46] [ warn] [engine] service will stop in 5 seconds\r\n[2021/10/17 12:27:51] [ info] [engine] service stopped\r\n[2021/10/17 12:27:51] [ info] [input:tail:tail.0] inotify_fs_remove(): inode=1703973 watch_fd=3\r\n==26743== \r\n==26743== HEAP SUMMARY:\r\n==26743== in use at exit: 0 bytes in 0 blocks\r\n==26743== total heap usage: 1,782 allocs, 1,782 frees, 4,030,797 bytes allocated\r\n==26743== \r\n==26743== All heap blocks were freed -- no leaks are possible\r\n==26743== \r\n==26743== For lists of detected and suppressed errors, rerun with: -s\r\n==26743== ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0)" + ] + } + }, + "metadata": { + "tags": [ + "fluent-bit", + "graduated", + "observability", + "troubleshoot", + "backport-to-v1-8-x" + ], + "cncfProjects": [ + "fluent-bit" + ], + "targetResourceKinds": [ + "Service", + "Node" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/fluent/fluent-bit/pull/4197", + "repo": "https://github.com/fluent/fluent-bit", + "pr": "https://github.com/kube-logging/logging-operator/pull/884" + }, + "reactions": 4, + "comments": 2, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with fluent-bit installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:05.842Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluent-bit/fluent-bit-5590-mp-sort-flb-record-accessor-by-a-number-of-subkeys-5546.json b/solutions/cncf-generated/fluent-bit/fluent-bit-5590-mp-sort-flb-record-accessor-by-a-number-of-subkeys-5546.json new file mode 100644 index 00000000..f1dc6f32 --- /dev/null +++ b/solutions/cncf-generated/fluent-bit/fluent-bit-5590-mp-sort-flb-record-accessor-by-a-number-of-subkeys-5546.json @@ -0,0 +1,103 @@ +{ + "version": "kc-mission-v1", + "name": "fluent-bit-5590-mp-sort-flb-record-accessor-by-a-number-of-subkeys-5546", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "fluent-bit: mp: sort flb_record_accessor by a number of subkeys(#5546)", + "description": "out_loki tries to remove key/value using flb_mp_accessor_keys_remove.\nThe API needs a key lists to indicate which key should be removed.\n\nI tested the API and I found that the list should be sorted if it is passed nested key/value.\nHowever the lists is not sorted at current master. It may not work if user passes nested key/value.\n\nThis patch is to fix it.\n- Add test code\n- Add API to get subkey number\n- Insert record_accessor sorted by subkey number\n\n----\nEnter `[N/A]` in the box, if an item is ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "out_loki tries to remove key/value using flb_mp_accessor_keys_remove.\nThe API needs a key lists to indicate which key should be removed.\n\nI tested the API and I found that the list should be sorted if" + }, + { + "title": "head -> val1", + "description": "head -> val1" + }, + { + "title": "head -> val2 -> val1", + "description": "head -> val2 -> val1" + }, + { + "title": "mk_list_init(&head)", + "description": "mk_list_init(&head)" + }, + { + "title": "mk_list_add(&val, &head)", + "description": "mk_list_add(&val, &head)" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n[INPUT]\r\n Name dummy\r\n dummy {\"log\":\"blahblah\",\"kubernetes\":{\"pod_name\":\"nginx\", \"namespace_name\":\"default\"}}\r\n\r\n[OUTPUT]\r\n Name loki\r\n auto_kubernetes_labels off\r\n labels pod=$kubernetes['pod_name']\r\n remove_keys kubernetes\r\n port 9200\r\n Log_Level debug\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/monkey/monkey/pull/371. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Fluent-bit project uses monkey as a library.\nThis patch is to add `mk_list_add_before` to fix a bug of fluent-bit.\nhttps://github.com/fluent/fluent-bit/pull/5590\n\nI found that there is no way to add entry like below case.\n\n1. head\n2. head -> val1\n3. head -> val2 -> val1\n\nA point is 3, there is no API.\n\n1. mk_list_init(&head)\n2. mk_list_add(&val, &head)\n3. ??? \n\nWe can't use `mk_list_add_after(&val2, &head, &head)` since `head->next` and `head->prev` is same value `val1`.\nThen it calls `mk_list_add` the list will be `head->val1->val2`\nhttps://github.com/monkey/monkey/blob/992749ec1e8e2bc5b8cbcf1cf4fde67d297354cc/include/monkey/mk_core/mk_list.h#L73", + "codeSnippets": [ + "[INPUT]\r\n Name dummy\r\n dummy {\"log\":\"blahblah\",\"kubernetes\":{\"pod_name\":\"nginx\", \"namespace_name\":\"default\"}}\r\n\r\n[OUTPUT]\r\n Name loki\r\n auto_kubernetes_labels off\r\n labels pod=$kubernetes['pod_name']\r\n remove_keys kubernetes\r\n port 9200\r\n Log_Level debug", + "[INPUT]\r\n Name dummy\r\n dummy {\"log\":\"blahblah\",\"kubernetes\":{\"pod_name\":\"nginx\", \"namespace_name\":\"default\"}}\r\n\r\n[OUTPUT]\r\n Name loki\r\n auto_kubernetes_labels off\r\n labels pod=$kubernetes['pod_name']\r\n remove_keys kubernetes\r\n port 9200\r\n Log_Level debug", + "## Debug log", + "\"kubernetes\" field is removed.", + "## Valgrind output" + ] + } + }, + "metadata": { + "tags": [ + "fluent-bit", + "graduated", + "observability", + "troubleshoot", + "ok-to-test", + "ci-integration-docker-ok", + "docs-required" + ], + "cncfProjects": [ + "fluent-bit" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Namespace" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/fluent/fluent-bit/pull/5590", + "repo": "https://github.com/fluent/fluent-bit", + "pr": "https://github.com/monkey/monkey/pull/371" + }, + "reactions": 3, + "comments": 14, + "synthesizedBy": "regex", + "qualityScore": 69 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with fluent-bit installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:15.968Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluent-bit/fluent-bit-8279-filter-kubernetes-add-kubernetes-namespace-metadata.json b/solutions/cncf-generated/fluent-bit/fluent-bit-8279-filter-kubernetes-add-kubernetes-namespace-metadata.json new file mode 100644 index 00000000..b1ce6ac4 --- /dev/null +++ b/solutions/cncf-generated/fluent-bit/fluent-bit-8279-filter-kubernetes-add-kubernetes-namespace-metadata.json @@ -0,0 +1,87 @@ +{ + "version": "kc-mission-v1", + "name": "fluent-bit-8279-filter-kubernetes-add-kubernetes-namespace-metadata", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "fluent-bit: filter_kubernetes: add kubernetes_namespace metadata", + "description": "This provides the ability to have the filter_kubernetes plugin fetch namespace labels & annotations and add them to the record under a `kubernetes_namespace` key. \n\n- Added 3 new configuration options:\n - `kube_meta_namespace_cache_ttl` - `FLB_CONFIG_MAP_TIME` - the TTL for the namespace cache\n - `namespace_annotations` - `FLB_CONFIG_MAP_BOOL` - On if you want namespace annotations added to the record. Default: Off\n - `namespace_labels` - `FLB_CONFIG_MAP_BOOL` - On if you want namespace la", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This provides the ability to have the filter_kubernetes plugin fetch namespace labels & annotations and add them to the record under a `kubernetes_namespace` key. \n\n- Added 3 new configuration options" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n[FILTER]\r\n Name kubernetes\r\n Match kube*\r\n K8S-Logging.Parser Off\r\n K8S-Logging.Exclude Off\r\n Use_Kubelet On\r\n Buffer_Size 1MB\r\n Annotations Off\r\n namespace_labels On\r\n namespace_annotations On\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/fluent/fluent-bit-docs/pull/1273. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Documentation for https://github.com/fluent/fluent-bit/pull/8279 which adds Kubernetes namespace meta fetching.", + "codeSnippets": [ + "[FILTER]\r\n Name kubernetes\r\n Match kube*\r\n K8S-Logging.Parser Off\r\n K8S-Logging.Exclude Off\r\n Use_Kubelet On\r\n Buffer_Size 1MB\r\n Annotations Off\r\n namespace_labels On\r\n namespace_annotations On", + "[FILTER]\r\n Name kubernetes\r\n Match kube*\r\n K8S-Logging.Parser Off\r\n K8S-Logging.Exclude Off\r\n Use_Kubelet On\r\n Buffer_Size 1MB\r\n Annotations Off\r\n namespace_labels On\r\n namespace_annotations On", + "If this is a change to packaging of containers or native binaries then please confirm it works for all targets.\r\n\r\n- [ ] Run [local packaging test](./packaging/local-build-all.sh) showing all targets (including any new ones) build.\r\n- [ ] Set `ok-package-test` label to test for all targets (requires maintainer to do).\r\n\r\n**Documentation**\r\n\r\n- [X] Documentation required for this feature\r\n\r\nhttps://github.com/fluent/fluent-bit-docs/pull/1280\r\n\r\n**Backporting**\r\n\r\n- [ ] Backport to latest stable release.\r\n\r\n\r\n----\r\n\r\nFluent Bit is licensed under Apache 2.0, by submitting this pull request I understand that this code will be released under the terms of that license.\r\n\nDocumentation for https://github.com/fluent/fluent-bit/pull/8279 which adds Kubernetes namespace meta fetching. \nAdded a second commit for the following... thought it was probably a good idea to include at least one test 😄 :", + "Initial test run failed due to missing test file. I didn't catch the `.gitignore` ignores `*.log` :). Just pushed an amended 2nd commit with the missing file. 🤞 \n@ryanohnemus I wonder if we can add some integration tests around the K8S filter to help prove this as well in the \"real world\" :)\r\n\r\nThey run on GKE currently anyway: https://github.com/fluent/fluent-bit-ci/tree/main/tests\n@patrick-stephens I created https://github.com/fluent/fluent-bit-ci/pull/121, but this is checking only the currently available pod labels. I assume this PR needs to be merged in first before I can add the namespace labels check.\n> @patrick-stephens I created [fluent/fluent-bit-ci#121](https://github.com/fluent/fluent-bit-ci/pull/121), but this is checking only the currently available pod labels. I assume this PR needs to be merged in first before I can add the namespace labels check.\r\n\r\nWe probably will have to unless we add a conditional test that is skipped when the filter help does not show the new options. In which case we can merge the integration test first then re-run it for this PR to confirm all good, then a separate int test PR update to be added after this PR is merged. We can run int tests with the specific PR image from this PR as well to confirm it would pass.\n@patrick-stephens - just amended the last commit comment and force pushed for it to restart the tests here with the new ci\n@ryanohnemus thanks for your sterling effort on this - both on this PR and all the supporting changes for testing/docs. The changes here need a bit of time to review.\n@patrick-stephens I really appreciate the feedback and the help on getting the new k8s ci tests added. They are finally passing! 🙌 \r\n\r\nI will work on the extended CI checks for k8s that contains the namespace labels checks as well as some additional tests I think should be added along with them. \r\n\r\n> We can run int tests with the specific PR image from this PR as well to confirm it would pass.\r\n\r\nDo you have an example of how to run with the PR image? I assume I change the`FLUENTBIT_IMAGE_REPOSITORY` & `FLUENTBIT_IMAGE_TAG` envs, but not exactly sure where I get those details from the PR build's image?\n> Do you have an example of how to run with the PR image? I assume I change the`FLUENTBIT_IMAGE_REPOSITORY` & `FLUENTBIT_IMAGE_TAG` envs, but not exactly sure where I get those details from the PR build's image?\r\n\r\nThe integration tests already pass the details over: https://github.com/fluent/fluent-bit/blob/18e5eda4b644723fcfbe6a46524de8430f856fe5/.github/workflows/pr-integration-test.yaml#L48\r\nThe image is first built then passed to the workflow - it's a shame we have duplicate workflows.\n@patrick-stephens thanks again for the pointer. I've added extended tests here: https://github.com/fluent/fluent-bit-ci/pull/127 and tested against this PR's image. \r\n\r\nLet me know if there's more that is needed!\nApologies for all the empty force pushed commits, I was trying (and failing) to get a kubelet enabled CI test running. Thank you again @patrick-stephens for your help (and patience) in that! \r\n\r\n@edsiper as far as progressing this PR forward, is there any estimate/milestone this can be attached to? I'm looking forward to getting this across the finish line!\nThis feature is eagerly awaited in kube-logging/logging-operator#1148 😄 Anything we can do to move this forward?\n@ryanohnemus I think we can link out to tests proving this now? That will demonstrate both the current behaviour working and this new behaviour right? If you can link that with some example output as well to show @edsiper this is well tested that will help.\n@patrick-stephens @edsiper - yes, i added CI tests for the kubernetes_filter here: https://github.com/fluent/fluent-bit-ci/blob/main/tests/kubernetes-plugins/full.bats and we have an example of those passing (also attached to this PR via the checks) here: https://github.com/fluent/fluent-bit/actions/runs/7654984585/job/20860421190?pr=8279\n@ryanohnemus post the log contents as well for posterity as they'll expire eventually\n@patrick-stephens understood! \r\n\r\n---\r\n1. namespace_labels only test, where a label `this_is_a_namespace_label: true` exists. The filter config is:", + "And the output is:" + ] + } + }, + "metadata": { + "tags": [ + "fluent-bit", + "graduated", + "observability", + "troubleshoot", + "ok-to-test", + "ci-integration-docker-ok", + "ci-integration-test-ok", + "docs-required" + ], + "cncfProjects": [ + "fluent-bit" + ], + "targetResourceKinds": [ + "Pod", + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/fluent/fluent-bit/pull/8279", + "repo": "https://github.com/fluent/fluent-bit", + "pr": "https://github.com/fluent/fluent-bit-docs/pull/1273" + }, + "reactions": 4, + "comments": 20, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with fluent-bit installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:07.929Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/fluentd/fluentd-4185-in-tail-use-inode-for-key-of-tailwatcher-when-follow-inodes.json b/solutions/cncf-generated/fluentd/fluentd-4185-in-tail-use-inode-for-key-of-tailwatcher-when-follow-inodes.json new file mode 100644 index 00000000..ece76161 --- /dev/null +++ b/solutions/cncf-generated/fluentd/fluentd-4185-in-tail-use-inode-for-key-of-tailwatcher-when-follow-inodes.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "fluentd-4185-in-tail-use-inode-for-key-of-tailwatcher-when-follow-inodes", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "fluentd: in_tail: Use inode for key of TailWatcher when follow_inodes", + "description": "**Which issue(s) this PR fixes**: \nPartially fixes #3614 (follow_inode true case)\n\n**What this PR does / why we need it**: \nThank you for reading to the great developer's!\nThis PR is fixed for unexpected file close after logs rotate in fluentd v1.16.1.\n\nI found the #3614 's reproducer.\nBefore applying this patch, fluentd cause unexpected file close after logs rotate every hour on my reproducer.\nAfter applying this patch, fluentd does not cause it on my reproducer.\nI have been running testing a l", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**Which issue(s) this PR fixes**: \nPartially fixes #3614 (follow_inode true case)\n\n**What this PR does / why we need it**: \nThank you for reading to the great developer's!\nThis PR is fixed for unexpec" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n> Could you fix your name in the Author in your commit message?\r\n\r\nIt's resolved, thanks!\n> Fix #3614\r\n\r\nCould you add `partially` or something similar to this line in your commit message & the first comment of this PR?\r\nAs I described in #3614, it doesn't seem fix the issue on `follow_inode false` case.\nIt seems that CI stalls on all platforms.\n> > Fix #3614\r\n> \r\n> Could you add `partially` or something similar to this line in your commit message & the first comment of this PR? As I described i\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/fluent/fluentd/pull/4191. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "**Which issue(s) this PR fixes**: \n\n**What this PR does / why we need it**:\n~~Add validation to make sure detach_watcher is detaching expected watcher. This can avoid unexpectedly detach new watcher created for new log file and lead to log stuck transiently.~~\n\nAdd log to check that detaching inode is the same as the detaching TailWatcher's inode when enabling `follow_inodes`.\n \nNote: If they do not match, canceling the detach (by adding `return`) may prevent an incorrect detach.\nSince #4208 will prevent an incorrect detach, we will only add the warning log in this PR for now.\n\n**Docs Changes**:\nN/A\n\n**Release Note**: \n~~Fix transient log stuck in in_tail when log file rotated and follow_inodes is enabled~~\nSame as the title.", + "codeSnippets": [ + "> Could you fix your name in the Author in your commit message?\r\n\r\nIt's resolved, thanks!\n> Fix #3614\r\n\r\nCould you add `partially` or something similar to this line in your commit message & the first comment of this PR?\r\nAs I described in #3614, it doesn't seem fix the issue on `follow_inode false` case.\nIt seems that CI stalls on all platforms.\n> > Fix #3614\r\n> \r\n> Could you add `partially` or something similar to this line in your commit message & the first comment of this PR? As I described i", + "> Could you fix your name in the Author in your commit message?\r\n\r\nIt's resolved, thanks!\n> Fix #3614\r\n\r\nCould you add `partially` or something similar to this line in your commit message & the first comment of this PR?\r\nAs I described in #3614, it doesn't seem fix the issue on `follow_inode false` case.\nIt seems that CI stalls on all platforms.\n> > Fix #3614\r\n> \r\n> Could you add `partially` or something similar to this line in your commit message & the first comment of this PR? As I described in #3614, it doesn't seem fix the issue on `follow_inode false` case.\r\n\r\nThank you for commenting.\r\nKatsuya will handle your comment next week.\r\n\r\nBy the way, please let me explain why we didn't take care of `follow_inode false` case.\r\n\r\nTo fix this unexpected file close issue, we have to modify the tail plugin to create a tail list with hash values **generated by inode numbers**.\r\nBut users who use `follow_inode false` might want the plugin to create a tail list with hash values **generated by file names**.\r\nThat's why we hesitated to take care of `follow_inode false` case in this commit.\r\n\r\nIn our understanding, `follow_inode false` is just for keeping the compatibility with old verions of the tail plugin.\r\nhttps://docs.fluentd.org/input/tail#follow_inodes explains that `follow_inode false` can cause \"read rotated files duplicately\" problem.\r\nSo every user should use `follow_inode true` unless there is some particular reason.\r\n\r\nPlease let me know if our understanding is wrong.\r\nWe can change our code if needed.\n> It seems that CI stalls on all platforms.", + "The cause is `test_should_close_watcher_after_rotate_wait` doesn't follow the in_tail's modification.\r\nIn addition, your patch seems to be missing a fix.", + "In `follow_inodes true` case:" + ] + } + }, + "metadata": { + "tags": [ + "fluentd", + "graduated", + "observability", + "troubleshoot", + "pending" + ], + "cncfProjects": [ + "fluentd" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/fluent/fluentd/pull/4185", + "repo": "https://github.com/fluent/fluentd", + "pr": "https://github.com/fluent/fluentd/pull/4191" + }, + "reactions": 1, + "comments": 71, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with fluentd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:53.727Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/flux/flux-3675-feat-add-reconcile-source-chart-command.json b/solutions/cncf-generated/flux/flux-3675-feat-add-reconcile-source-chart-command.json new file mode 100644 index 00000000..cf29e02c --- /dev/null +++ b/solutions/cncf-generated/flux/flux-3675-feat-add-reconcile-source-chart-command.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "flux-3675-feat-add-reconcile-source-chart-command", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "flux: feat: add reconcile source chart command", + "description": "## Current situation\nReconciling helm charts which are configured as semver are not reconciled via reconcile hr. \nThis is because the HelmChart does not get annotated.\n\nMore details are found here:\n* https://github.com/fluxcd/flux2/pull/3660\n* https://github.com/fluxcd/flux2/issues/3656\n\n## Proposal\nThis pr adds a new command `reconcile source chart` which also supports the --source flag for the referencing repository.\n`reconcile hr --with-source` now supports annotating the entire source tree f", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Current situation\nReconciling helm charts which are configured as semver are not reconciled via reconcile hr. \nThis is because the HelmChart does not get annotated.\n\nMore details are found here:\n* " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: helm.toolkit.fluxcd.io/v2beta1\r\nkind: HelmRelease\r\nmetadata:\r\n name: my-release\r\nspec:\r\n chart:\r\n spec:\r\n chart: mychart\r\n interval: 15m0s\r\n sourceRef:\r\n kind: HelmRepository\r\n name: oci-repository\r\n namespace: flux-system\r\n version: '*'\r\n interval: 15m0s\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/fluxcd/flux2/pull/3660. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## Current situation\n`flux reconcile hr my-release --with-source` does not update the HelmChart (installed from an oci registry) if the version is a semver expression (like in this example `*`) and a new version of the chart was released meanwhile.\nMeaning at the moment one has either to await the configured interval or manually annotate the referenced HelmChart in order to receive the new version.\nI would expect that if I manually trigger a reconcile I get the latest version possible which is matched by semver which is not the case currently.\n\n```yaml\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\n name: my-release\nspec:\n chart:\n spec:\n chart: mychart\n interval: 15m0s\n sourceRef:\n kind: HelmRepository\n name: oci-repository\n namespace: flux-system\n version: '*'\n interval: 15m0s\n```\n## Proposal\n`flux reconcile hr my-release --with-source` should actually annotate the referenced helm chart as well.\n\nThis pr implements just that:\n```\ngo run . reconcile hr my-release --with-source\n► annotating HelmRepository oci-repository in default namespace\n✔ HelmRepository annotated\n◎ waiting for HelmRepository reconciliation\n✔ Helm repository is ready\n► annotating HelmRelease my-release in default namespace\n✔ HelmRelease annotated\n◎ waiting for HelmRelease reconciliation\n✔ applied revision 4.0.11+1\n► annotating HelmChart default-my-release in flux-system namespace\n✔ HelmChart annotated\n◎ waiting for HelmChart reconciliati", + "codeSnippets": [ + "apiVersion: helm.toolkit.fluxcd.io/v2beta1\r\nkind: HelmRelease\r\nmetadata:\r\n name: my-release\r\nspec:\r\n chart:\r\n spec:\r\n chart: mychart\r\n interval: 15m0s\r\n sourceRef:\r\n kind: HelmRepository\r\n name: oci-repository\r\n namespace: flux-system\r\n version: '*'\r\n interval: 15m0s", + "apiVersion: helm.toolkit.fluxcd.io/v2beta1\r\nkind: HelmRelease\r\nmetadata:\r\n name: my-release\r\nspec:\r\n chart:\r\n spec:\r\n chart: mychart\r\n interval: 15m0s\r\n sourceRef:\r\n kind: HelmRepository\r\n name: oci-repository\r\n namespace: flux-system\r\n version: '*'\r\n interval: 15m0s", + "go run . reconcile hr my-release --with-source\r\n► annotating HelmRepository oci-repository in default namespace\r\n✔ HelmRepository annotated\r\n◎ waiting for HelmRepository reconciliation\r\n✔ Helm repository is ready\r\n► annotating HelmRelease my-release in default namespace\r\n✔ HelmRelease annotated\r\n◎ waiting for HelmRelease reconciliation\r\n✔ applied revision 4.0.11+1\r\n► annotating HelmChart default-my-release in flux-system namespace\r\n✔ HelmChart annotated\r\n◎ waiting for HelmChart reconciliation\r\n✔ fetched revision 4.0.12+1" + ] + } + }, + "metadata": { + "tags": [ + "flux", + "graduated", + "app-definition", + "deploy", + "enhancement", + "area-helm" + ], + "cncfProjects": [ + "flux" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "deploy" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/fluxcd/flux2/pull/3675", + "repo": "https://github.com/fluxcd/flux2", + "pr": "https://github.com/fluxcd/flux2/pull/3660" + }, + "reactions": 3, + "comments": 3, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with flux installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:14.671Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-22062-add-compatibility-for-python-s-eventlet-library.json b/solutions/cncf-generated/grpc/grpc-22062-add-compatibility-for-python-s-eventlet-library.json new file mode 100644 index 00000000..d30b5940 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-22062-add-compatibility-for-python-s-eventlet-library.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "grpc-22062-add-compatibility-for-python-s-eventlet-library", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "grpc: Add compatibility for Python's Eventlet library", + "description": "Trying to use the gRPC library in a Python program that uses the\nEventlet library doesn't work as expected, and we end up with deadlocks\nand blocked threads.\n\nThis patch adds a custom eventlet IO manager to provide compatibility\nbetween the Eventlet and gRPC libraries.\n\nThe code includes 2 workarounds for existing eventlet bugs, but this\ncode is compatible with the proposed PRs to solve them, so there should\nbe no problem if/when implements a fix for them:\n\n - https://github.com/eventlet/eventle", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Trying to use the gRPC library in a Python program that uses the\nEventlet library doesn't work as expected, and we end up with deadlocks\nand blocked threads.\n\nThis patch adds a custom eventlet IO mana" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\npython tools/run_tests/run_tests.py --runs_per_test 1000 --language python --iomgr_platform eventlet\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/grpc/grpc/pull/22270. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "When working on the [eventlet Python I/O manager](https://github.com/grpc/grpc/pull/22062) I found some deficiencies when running Python tests with `run_tests.py`:\n\n- The `--forever` parameter doesn't work as expected, because there are false detection of changed source files because we write coverage and results in watched directories.\n\n- When passing `--runs_per_test` we cannot tell how many runs actually failed, we can only tell how many tests failed, but multiple tests may have failed in a single run.\n\n- Flaky tests in Python Custom I/O managers can only be disabled, we cannot do a single retry for them.\n\n- Passing `--runs_per_tests=1000` with `--use_docker` will result in all tests failing after a while because the container runs out of disk space.\n\nThis PR includes the changes I did to resolve these and be able to evaluate the impact that running the eventlet tests in the CI would have.\n\n@veblush", + "codeSnippets": [ + "python tools/run_tests/run_tests.py --runs_per_test 1000 --language python --iomgr_platform eventlet", + "python tools/run_tests/run_tests.py --runs_per_test 1000 --language python --iomgr_platform eventlet" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "troubleshoot", + "kind-enhancement", + "lang-python", + "release-notes--yes", + "kind-experimental-api", + "disposition-stale" + ], + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/grpc/grpc/pull/22062", + "repo": "https://github.com/grpc/grpc", + "pr": "https://github.com/grpc/grpc/pull/22270" + }, + "reactions": 6, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with grpc installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:57.140Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-22594-fix-generating-method-output-with-nested-resource.json b/solutions/cncf-generated/grpc/grpc-22594-fix-generating-method-output-with-nested-resource.json new file mode 100644 index 00000000..7d6dd998 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-22594-fix-generating-method-output-with-nested-resource.json @@ -0,0 +1,90 @@ +{ + "version": "kc-mission-v1", + "name": "grpc-22594-fix-generating-method-output-with-nested-resource", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "grpc: Fix generating method output with nested resource", + "description": "Fixes https://github.com/grpc/grpc/issues/21561\n\n> The grpc_ruby_plugin protoc plugin has a bug in generating Ruby output for proto files with nested resources and using the ruby_package option.\n> \n> The plugin works fine when using either nested resources or ruby_package, but not both.\n> https://github.com/grpc/grpc/issues/21561\n\nFix to use `descriptor->full_name()` removed package name instead of `descriptor->name()` when using the `ruby_package` option.\n\n@apolcyn", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Fixes https://github.com/grpc/grpc/issues/21561\n\n> The grpc_ruby_plugin protoc plugin has a bug in generating Ruby output for proto files with nested resources and using the ruby_package option.\n> \n> " + }, + { + "title": "In #22594, we fixed an issue with nested message. Example, let's say we have ...", + "description": "In #22594, we fixed an issue with nested message. Example, let's say we have a proto message" + }, + { + "title": "However, this breaks the case if the message is in a _different_ proto packag...", + "description": "However, this breaks the case if the message is in a _different_ proto package. Example, let's say we now have an imported proto file:" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\npackage grpc.testing;\r\noption ruby_package = \"A::Other\";\r\nmessage Foo {\r\n message Bar { }\r\n}\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/grpc/grpc/pull/23501. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "**Summary**:\n\nFixes an issue where the generated code is incorrect, if the message is being imported from another proto file with a _different_ proto package, _and_ the proto has a custom `option ruby_package = \"...\"` value.\n\nThis is a regression introduced by #22594 in the `v1.30.x` release.\n\n**Details**:\n\n1. In #22594, we fixed an issue with nested message. Example, let's say we have a proto message\n```\npackage grpc.testing;\noption ruby_package = \"A::Other\";\nmessage Foo {\n message Bar { }\n}\n```\nPrior to #22594, the generated code will only have `::Bar` instead of `::Foo::Bar` because we are using only `descriptor->name()` in the code generator. So for #22594, the fix's strategy is to start with `descriptor->full_name()`, which is `grpc.testing.Foo.Bar`, and then remove `package`, which is `grpc.testing`, so we end up with `Foo.Bar`, and then we can add the `option ruby_package = \"...\"`, and end up with `A::Other::Foo::Bar`. This is correct.\n\n2. However, this breaks the case if the message is in a _different_ proto package. Example, let's say we now have an imported proto file:\n```\npackage grpc.foo;\noption ruby_package = \"B::Other\";\nmessage Bar {\n message Baz { }\n}\n```\nAnd let's say, now, we are referring to messages in this type in our original file as the fully qualified name `grpc.foo.Bar.Baz`. \n\nThe problem is:\n- we start with `descriptor->full_name()` which is `grpc.foo.Bar.Baz`\n- and then we attempt to _remove_ `package` from the beginn", + "codeSnippets": [ + "package grpc.testing;\r\noption ruby_package = \"A::Other\";\r\nmessage Foo {\r\n message Bar { }\r\n}", + "package grpc.testing;\r\noption ruby_package = \"A::Other\";\r\nmessage Foo {\r\n message Bar { }\r\n}", + "package grpc.foo;\r\noption ruby_package = \"B::Other\";\r\nmessage Bar {\r\n message Baz { }\r\n}", + "$ make grpc_ruby_plugin -j8\r\n$ make $(pwd)/libs/opt/protobuf/libprotobuf.a -j8\r\n$ cd src/ruby\r\n$ CONFIG=opt rake\r\n$ cd ../..\r\n$ CONFIG=opt tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "troubleshoot", + "lang-ruby", + "release-notes--yes", + "disposition-never-stale" + ], + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/grpc/grpc/pull/22594", + "repo": "https://github.com/grpc/grpc", + "pr": "https://github.com/grpc/grpc/pull/23501" + }, + "reactions": 8, + "comments": 2, + "synthesizedBy": "regex", + "qualityScore": 70 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with grpc installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:51.330Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-27121-drop-msse4-compiler-flag.json b/solutions/cncf-generated/grpc/grpc-27121-drop-msse4-compiler-flag.json new file mode 100644 index 00000000..d6a832a1 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-27121-drop-msse4-compiler-flag.json @@ -0,0 +1,83 @@ +{ + "version": "kc-mission-v1", + "name": "grpc-27121-drop-msse4-compiler-flag", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "grpc: Drop -msse4 compiler flag", + "description": "Older CPUs that do not have SSE4.1 would crash with the Ruby native gem due to an illegal instruction exception.\n\nThe Abseil random library isn't being used at the moment (https://github.com/grpc/grpc/pull/26476), and there's no reason gRPC needs to force SSE4.1 instructions on all platforms at the moment. There are other hardware-specific issues that need to be ironed out for this to work: https://github.com/grpc/grpc/pull/26479\n\nWhen the `-msse4` compiler flag was enabled, the Abseil code star", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Older CPUs that do not have SSE4.1 would crash with the Ruby native gem due to an illegal instruction exception.\n\nThe Abseil random library isn't being used at the moment (https://github.com/grpc/grpc" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ elfx86exts abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.o\r\nMODE64 (ret)\r\nCMOV (cmovne)\r\nSSE2 (movdqa)\r\nSSE41 (pinsrb)\r\nSSE1 (movaps)\r\nCPU Generation: Penryn\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/abseil/abseil-cpp/pull/1015. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "`immintrin.h` is the de-factor standard header for clang and GCC to include Intel intrinsics. Using this header avoids requiring the compiler to use the `-maes` and `-msse4.1` compiler options on systems that may not have AES or SSE instruction support.\n\nclang: As seen in https://github.com/llvm-mirror/clang/blob/master/lib/Headers/immintrin.h,\nspecific intrinsic header files are conditionally included depending on whether the feature is available.\n\ngcc: As seen in https://github.com/gcc-mirror/gcc/blob/master/gcc/config/i386/immintrin.h, gcc includes all intrinsic header files, but each individual file guards against the feature not being available.\n\nThis came out of an investigation in https://github.com/grpc/grpc/pull/27121.", + "codeSnippets": [ + "$ elfx86exts abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.o\r\nMODE64 (ret)\r\nCMOV (cmovne)\r\nSSE2 (movdqa)\r\nSSE41 (pinsrb)\r\nSSE1 (movaps)\r\nCPU Generation: Penryn", + "$ elfx86exts abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.o\r\nMODE64 (ret)\r\nCMOV (cmovne)\r\nSSE2 (movdqa)\r\nSSE41 (pinsrb)\r\nSSE1 (movaps)\r\nCPU Generation: Penryn", + "This sounds like `-maes` **might** be necessary, but `-msse4` could be dropped. What do you think?\nI'm not sure what is defining `ABSL_ARCH_X86_64` because it's not being defined in my build. I added some garbage in the `elif` block, and gRPC compiled fine.\nOh, it looks like abseil defines this: https://github.com/abseil/abseil-cpp/blob/997aaf3a28308eba1b9156aa35ab7bca9688e9f6/absl/random/internal/platform.h#L63\r\n\nIt looks gcc 5 and up don't need these compiler flags because they are only enabled when specific flags are in use (https://github.com/gcc-mirror/gcc/commit/97db2bf7fb10e7eb2e8224e0471b56976f133843). For example:", + "`emmintrin.h` does this as well:" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "troubleshoot", + "lang-core", + "area-build", + "release-notes--no", + "imported" + ], + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/grpc/grpc/pull/27121", + "repo": "https://github.com/grpc/grpc", + "pr": "https://github.com/abseil/abseil-cpp/pull/1015" + }, + "reactions": 8, + "comments": 18, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with grpc installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:53.653Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/grpc/grpc-8601-node-electron-build.json b/solutions/cncf-generated/grpc/grpc-8601-node-electron-build.json new file mode 100644 index 00000000..de4276c8 --- /dev/null +++ b/solutions/cncf-generated/grpc/grpc-8601-node-electron-build.json @@ -0,0 +1,79 @@ +{ + "version": "kc-mission-v1", + "name": "grpc-8601-node-electron-build", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "grpc: Node electron build", + "description": "This allows the Node library to be built for Electron by following [the existing procedure on the Electron website](http://electron.atom.io/docs/tutorial/using-native-node-modules/). It also adds scripts for building artifacts for Electron and running the tests on Electron. This fixes #6138 and maybe #8166.\n\nElectron does not work with the libuv endpoint implementation, so we will continue to need the non-uv implementation. So, I also refactored the uv/non-uv split in the Node extension code.\n\nT", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This allows the Node library to be built for Electron by following [the existing procedure on the Electron website](http://electron.atom.io/docs/tutorial/using-native-node-modules/). It also adds scri" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n../src/boringssl/err_data.c:17:10: fatal error: 'openssl/base.h' file not found\n#include \n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "It looks like you installed gRPC as a dependency for your own app, and you're running `npm test` for that app. If that's accurate, you should just do what you normally do to run your tests on Electron.\n\nIf you're asking how to run the gRPC tests in the gRPC repository using electron, you should be able to do something like this (assuming you have electron installed globally)\n\n```sh\nelectron ./node_modules/.bin/electron-mocha src/node/test\n```", + "codeSnippets": [ + "../src/boringssl/err_data.c:17:10: fatal error: 'openssl/base.h' file not found\n#include ", + "../src/boringssl/err_data.c:17:10: fatal error: 'openssl/base.h' file not found\n#include ", + "npm run electron-build -- --target=1.4.6", + "./tools/run_tests/build_node_electron.sh 1.4.6", + "electron ./node_modules/.bin/electron-mocha src/node/test" + ] + } + }, + "metadata": { + "tags": [ + "grpc", + "incubating", + "networking", + "troubleshoot", + "lang-node", + "cla--yes" + ], + "cncfProjects": [ + "grpc" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/grpc/grpc/pull/8601", + "repo": "https://github.com/grpc/grpc" + }, + "reactions": 7, + "comments": 33, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with grpc installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:55.147Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/harbor/harbor-17932-aws-auth-use-default-creds-when-none-are-supplied.json b/solutions/cncf-generated/harbor/harbor-17932-aws-auth-use-default-creds-when-none-are-supplied.json new file mode 100644 index 00000000..3b4e58c3 --- /dev/null +++ b/solutions/cncf-generated/harbor/harbor-17932-aws-auth-use-default-creds-when-none-are-supplied.json @@ -0,0 +1,79 @@ +{ + "version": "kc-mission-v1", + "name": "harbor-17932-aws-auth-use-default-creds-when-none-are-supplied", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "harbor: aws auth use default creds when none are supplied", + "description": "# Comprehensive Summary of your change\n\nHarbor should not assume that if AWS Credentials are not defined that the user wants to use `ec2rolecreds`. Rather, it should allow the AWS SDK to resolve credentials by passing `nil` Credentials:\n\nhttps://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials\n\nThis _should_ be backwards compatible because the defualt provider chain does look for: `4. If your application is running on an Amazon EC2 instance, IAM role", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "# Comprehensive Summary of your change\n\nHarbor should not assume that if AWS Credentials are not defined that the user wants to use `ec2rolecreds`. Rather, it should allow the AWS SDK to resolve cred" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ export AWS_PROFILE=my-profile-name\r\n$ export AWS_SDK_LOAD_CONFIG=true\r\n$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com\r\n2022-12-08T13:21:24-05:00 [INFO] [/pkg/reg/adapter/native/adapter.go:36]: the factory for adapter docker-registry registered\r\n2022-12-08T13:21:24-05:00 [INFO] [/pkg/reg/adapter/awsecr/adapter.go:44]: the factory for adapter aws-ecr registered\r\n2022-12-08T13:21:30-05:00 [FATAL] [/awssecrettest/awssecrettest.go:38]: EC2RoleRequestError: no EC2 instance role found\r\ncau\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "A little tough to unit test because you need actual creds to run through the SDK. But I purpose-built a couple test binaries in branches on my fork. This run demonstrates that Before, the `Shared credentials file` is not loaded per AWS' default credential chain, but After it is.\n\nI also tested Good/Bad static creds after the change and there are no regressions there.\n\n## Before Change:\n\nTest source: https://github.com/caleblloyd/harbor/tree/aws-test-before-change/src/awssecrettest\n\n### Auto Detect\n\n```\n$ export AWS_PROFILE=my-profile-name\n$ export AWS_SDK_LOAD_CONFIG=true\n$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com\n2022-12-08T13:21:24-05:00 [INFO] [/pkg/reg/adapter/native/adapter.go:36]: the factory for adapter docker-registry registered\n2022-12-08T13:21:24-05:00 [INFO] [/pkg/reg/adapter/awsecr/adapter.go:44]: the factory for adapter aws-ecr registered\n2022-12-08T13:21:30-05:00 [FATAL] [/awssecrettest/awssecrettest.go:38]: EC2RoleRequestError: no EC2 instance role found\ncaused by: RequestError: send request failed\ncaused by: Get \"http://169.254.169.254/latest/meta-data/iam/security-credentials/\": dial tcp 169.254.169.254:80: connect: no route to host\n```\n\n## After Change:\n\nTest source: https://github.com/caleblloyd/harbor/tree/aws-test-after-change/src/awssecrettest\n\n### Auto Detect\n\n```\n$ export AWS_PROFILE=my-profile-name\n$ export AWS_SDK_LOAD_CONFIG=true\n$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com\n2022-12-08T13:18:08-05:00 [INFO] [/pkg/reg/ada", + "codeSnippets": [ + "$ export AWS_PROFILE=my-profile-name\r\n$ export AWS_SDK_LOAD_CONFIG=true\r\n$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com\r\n2022-12-08T13:21:24-05:00 [INFO] [/pkg/reg/adapter/native/adapter.go:36]: the factory for adapter docker-registry registered\r\n2022-12-08T13:21:24-05:00 [INFO] [/pkg/reg/adapter/awsecr/adapter.go:44]: the factory for adapter aws-ecr registered\r\n2022-12-08T13:21:30-05:00 [FATAL] [/awssecrettest/awssecrettest.go:38]: EC2RoleRequestError: no EC2 instance role found\r\ncau", + "$ export AWS_PROFILE=my-profile-name\r\n$ export AWS_SDK_LOAD_CONFIG=true\r\n$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com\r\n2022-12-08T13:21:24-05:00 [INFO] [/pkg/reg/adapter/native/adapter.go:36]: the factory for adapter docker-registry registered\r\n2022-12-08T13:21:24-05:00 [INFO] [/pkg/reg/adapter/awsecr/adapter.go:44]: the factory for adapter aws-ecr registered\r\n2022-12-08T13:21:30-05:00 [FATAL] [/awssecrettest/awssecrettest.go:38]: EC2RoleRequestError: no EC2 instance role found\r\ncaused by: RequestError: send request failed\r\ncaused by: Get \"http://169.254.169.254/latest/meta-data/iam/security-credentials/\": dial tcp 169.254.169.254:80: connect: no route to host", + "$ export AWS_PROFILE=my-profile-name\r\n$ export AWS_SDK_LOAD_CONFIG=true\r\n$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com\r\n2022-12-08T13:18:08-05:00 [INFO] [/pkg/reg/adapter/native/adapter.go:36]: the factory for adapter docker-registry registered\r\n2022-12-08T13:18:08-05:00 [INFO] [/pkg/reg/adapter/awsecr/adapter.go:44]: the factory for adapter aws-ecr registered\r\n", + "$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com \"bad\" \"bad\"\r\n2022-12-08T13:32:40-05:00 [INFO] [/pkg/reg/adapter/native/adapter.go:36]: the factory for adapter docker-registry registered\r\n2022-12-08T13:32:40-05:00 [INFO] [/pkg/reg/adapter/awsecr/adapter.go:44]: the factory for adapter aws-ecr registered\r\n2022-12-08T13:32:41-05:00 [FATAL] [/awssecrettest/awssecrettest.go:38]: UnrecognizedClientException: The security token included in the request is invalid.\r\n status code: 400, request id: 7c3a3f19-95c8-4cdd-9b39-a1cb07edab2b\r\nexit status 1", + "$ ./run.sh https://123456.dkr.ecr.us-east-2.amazonaws.com \"xxxxxx\" \"xxxxxx\"\r\n2022-12-08T13:35:15-05:00 [INFO] [/pkg/reg/adapter/native/adapter.go:36]: the factory for adapter docker-registry registered\r\n2022-12-08T13:35:15-05:00 [INFO] [/pkg/reg/adapter/awsecr/adapter.go:44]: the factory for adapter aws-ecr registered\r\n" + ] + } + }, + "metadata": { + "tags": [ + "harbor", + "graduated", + "security", + "troubleshoot", + "release-note-enhancement", + "target-2-8-0" + ], + "cncfProjects": [ + "harbor" + ], + "targetResourceKinds": [ + "Role" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/goharbor/harbor/pull/17932", + "repo": "https://github.com/goharbor/harbor" + }, + "reactions": 3, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with harbor installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:25:51.263Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/headlamp/headlamp-2231-frontend-introduce-react-query-and-v2-api.json b/solutions/cncf-generated/headlamp/headlamp-2231-frontend-introduce-react-query-and-v2-api.json new file mode 100644 index 00000000..00552e21 --- /dev/null +++ b/solutions/cncf-generated/headlamp/headlamp-2231-frontend-introduce-react-query-and-v2-api.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "headlamp-2231-frontend-introduce-react-query-and-v2-api", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "headlamp: frontend: Introduce react-query and v2 API", + "description": "This PR introduces react-query as the library to perform and coordinate requests to the backend.\nIt brings nice quality of life improvements like caching, deduplicating requests, error handling, convenient react hooks.\n\nFor KubeObject classes, new methods were added: useQuery (alternative to useGet), useListQuery (alternative to useList). The old methods are left as-is (marked as deprecated) for compatibility with plugins. \n\nSome requests were not converted to limit the scope of this PR. Things ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR introduces react-query as the library to perform and coordinate requests to the backend.\nIt brings nice quality of life improvements like caching, deduplicating requests, error handling, conve" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nindex-B1wInVTN.js:830 Error: Error: Unreachable\r\n at backendFetch (index-B1wInVTN.js:830:27146)\r\n at async clusterFetch (index-B1wInVTN.js:830:27624)\r\n at async Promise.any (/tmp/.mount_Headlae2jo9U/resources/frontend/index 1)\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubernetes-sigs/headlamp/pull/2181. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> At the moment I can't see any of the extra pieces of data used. The most common usage is like this:\n> \n> ```ts\n> const [pods, podsFetchError] = Pod.useList({\n> namespace,\n> labelSelector: getPodsSelectorFilter(service),\n> });\n> ```\n> \n> Which is simpler to use \n\nYes, right now in most places we're just using result and error. \nBut now we use react-query and there's more information that we can (and should) use.\n\nFor example, now that we have caching, a common feature that improves UX is some kind of loading indicator that appears when data is refetched (while cached data is displayed). \n\nThere's no way to represent this with just `[data,error]` tuple. Currently we use `null` to represent that data is loading but that is not a good practice (while appearing simple, sometimes `null` might be a valid response from backend). A much better solution is having a `status` variable. \n\nIf you look at the current type definition of useList you'll see \n\n```\n[any[], ApiError | null, (items: any[]) => void, (err: ApiError | null) => void];\n```\n\nwhich is not always obvious, for example guessing from just the type what the last item does is not clear.\nHaving a well documented object with clearly named properties is much easier to understand and to work with.\n\n> The queryParams is redundant here, because \"Selector\" is already signifying that it's a selector.\n\nQuery parameters is a set of properties that is separate from the other parameters. Just like it is separated in Kubernet", + "codeSnippets": [ + "index-B1wInVTN.js:830 Error: Error: Unreachable\r\n at backendFetch (index-B1wInVTN.js:830:27146)\r\n at async clusterFetch (index-B1wInVTN.js:830:27624)\r\n at async Promise.any (/tmp/.mount_Headlae2jo9U/resources/frontend/index 1)", + "index-B1wInVTN.js:830 Error: Error: Unreachable\r\n at backendFetch (index-B1wInVTN.js:830:27146)\r\n at async clusterFetch (index-B1wInVTN.js:830:27624)\r\n at async Promise.any (/tmp/.mount_Headlae2jo9U/resources/frontend/index 1)", + "TypeError: Cannot read properties of undefined (reading 'pluralName')\r\n at xe (index-B1wInVTN.js:1128:19944)\r\n at index-B1wInVTN.js:1128:20388\r\n at Array.map ()\r\n at Overview (index-B1wInVTN.js:1128:20235)\r\n at um (vendor-mui-BWZhF6T-.js:38:17018)\r\n at nx (vendor-mui-BWZhF6T-.js:40:44058)\r\n at Jb (vendor-mui-BWZhF6T-.js:40:39790)\r\n at YR (vendor-mui-BWZhF6T-.js:40:39718)\r\n at Hc (vendor-mui-BWZhF6T-.js:40:39570)\r\n at Zp (vendor-mui-BWZhF6T-.js:40:35934)\r\njp @ vendor-mui-BWZhF6T-.js:40", + "> TypeError: Cannot read properties of undefined (reading 'pluralName')\r\n> at xe (index-B1wInVTN.js:1128:19944)\r\n> at index-B1wInVTN.js:1128:20388\r\n> at Array.map ()\r\n> at Overview (index-B1wInVTN.js:1128:20235)\r\n> at um (vendor-mui-BWZhF6T-.js:38:17018)\r\n> at nx (vendor-mui-BWZhF6T-.js:40:44058)\r\n> at Jb (vendor-mui-BWZhF6T-.js:40:39790)\r\n> at YR (vendor-mui-BWZhF6T-.js:40:39718)\r\n> at Hc (vendor-mui-BWZhF6T-.js:40:39570)\r\n> at Zp (vendor-mui-BWZhF6T-.js:40:35934)\r\n> jp @ vendor-mui-BWZhF6T-.js:40\r\n>", + "New use of useListQuery." + ] + } + }, + "metadata": { + "tags": [ + "headlamp", + "sandbox", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "headlamp" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/kubernetes-sigs/headlamp/pull/2231", + "repo": "https://github.com/kubernetes-sigs/headlamp", + "pr": "https://github.com/kubernetes-sigs/headlamp/pull/2181" + }, + "reactions": 2, + "comments": 29, + "synthesizedBy": "regex", + "qualityScore": 65 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with headlamp installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:40:24.533Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-10603-trigger-hook-delete-policy-after-log-retrieval-for-helm-test.json b/solutions/cncf-generated/helm/helm-10603-trigger-hook-delete-policy-after-log-retrieval-for-helm-test.json new file mode 100644 index 00000000..c1ab9904 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-10603-trigger-hook-delete-policy-after-log-retrieval-for-helm-test.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "helm-10603-trigger-hook-delete-policy-after-log-retrieval-for-helm-test", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "helm: Trigger hook-delete-policy after log retrieval for helm test", + "description": "**What this PR does / why we need it**:\n\nIf we want to run `helm test --logs`, this results in an error like:\n````\n$ helm --namespace test validation --logs\nNAME: validation\nLAST DEPLOYED: Sat Jan 22 22:43:19 2022\nNAMESPACE: test\nSTATUS: deployed\nREVISION: 2\nTEST SUITE: test-secrets-store\nLast Started: Sat Jan 22 22:43:25 2022\nLast Completed: Sat Jan 22 22:43:27 2022\nPhase: Succeeded\n\nError: unable to get pod logs for test-secrets-store: pods \"test-secrets-store\" not found\n````\n\nW", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**What this PR does / why we need it**:\n\nIf we want to run `helm test --logs`, this results in an error like:\n````\n$ helm --namespace test validation --logs\nNAME: validation\nLAST DEPLOYED: Sat Jan 22 " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ helm --namespace test validation --logs\r\nNAME: validation\r\nLAST DEPLOYED: Sat Jan 22 22:43:19 2022\r\nNAMESPACE: test\r\nSTATUS: deployed\r\nREVISION: 2\r\nTEST SUITE: test-secrets-store\r\nLast Started: Sat Jan 22 22:43:25 2022\r\nLast Completed: Sat Jan 22 22:43:27 2022\r\nPhase: Succeeded\r\n\r\nError: unable to get pod logs for test-secrets-store: pods \"test-secrets-store\" not found\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/eclipse-tractusx/tractusx-edc/pull/480. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## WHAT\n\nAdding [Hook deletion policies](https://helm.sh/docs/topics/charts_hooks/#hook-deletion-policies) for the Helm tests.\n\n## WHY\n\nWhen running tests with Helm the pods will not be tidied up.\nWhen the annotation `\"helm.sh/hook-delete-policy\": hook-succeeded` is added, after a successful run the pod will be deleted afterwards.\nIf the pod fails it will still be present.\n\nAlso the value `before-hook-creation` was added to be able to run the test again if it failed without manually deleting the pod.\n\n
\n\nMarco Lecheler [marco.lecheler@mercedes-benz.com](mailto:marco.lecheler@mercedes-benz.com) Mercedes-Benz Tech Innovation GmbH ([ProviderInformation](https://github.com/mercedes-benz/foss/blob/master/PROVIDER_INFORMATION.md))", + "codeSnippets": [ + "$ helm --namespace test validation --logs\r\nNAME: validation\r\nLAST DEPLOYED: Sat Jan 22 22:43:19 2022\r\nNAMESPACE: test\r\nSTATUS: deployed\r\nREVISION: 2\r\nTEST SUITE: test-secrets-store\r\nLast Started: Sat Jan 22 22:43:25 2022\r\nLast Completed: Sat Jan 22 22:43:27 2022\r\nPhase: Succeeded\r\n\r\nError: unable to get pod logs for test-secrets-store: pods \"test-secrets-store\" not found", + "$ helm --namespace test validation --logs\r\nNAME: validation\r\nLAST DEPLOYED: Sat Jan 22 22:43:19 2022\r\nNAMESPACE: test\r\nSTATUS: deployed\r\nREVISION: 2\r\nTEST SUITE: test-secrets-store\r\nLast Started: Sat Jan 22 22:43:25 2022\r\nLast Completed: Sat Jan 22 22:43:27 2022\r\nPhase: Succeeded\r\n\r\nError: unable to get pod logs for test-secrets-store: pods \"test-secrets-store\" not found" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "troubleshoot", + "size-m", + "needs-rebase", + "bug" + ], + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Pod", + "Secret", + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/helm/helm/pull/10603", + "repo": "https://github.com/helm/helm", + "pr": "https://github.com/eclipse-tractusx/tractusx-edc/pull/480" + }, + "reactions": 25, + "comments": 14, + "synthesizedBy": "regex", + "qualityScore": 70 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with helm installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:32.815Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-11760-fail-strict-lints-when-unused-values-are-provided.json b/solutions/cncf-generated/helm/helm-11760-fail-strict-lints-when-unused-values-are-provided.json new file mode 100644 index 00000000..449cda11 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-11760-fail-strict-lints-when-unused-values-are-provided.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "helm-11760-fail-strict-lints-when-unused-values-are-provided", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "helm: Fail strict lints when unused values are provided", + "description": "**What this PR does / why we need it**:\n\nWhen an unused value is passed into the chart, a strict lint will throw an error. For example, given the chart, a `helm lint --strict` will fail.\n\n```\n# templates/configmap.yaml\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: example-chart\n namespace: example-chart\n labels:\n team: infrastructure\n chart_name: {{ $.Chart.Name }}\n chart_version: {{ $.Chart.Version }}\n annotations:\n deleteme: \"if found\"\n```\n\n```\n# values.yaml\nsuper:\n unused: ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**What this PR does / why we need it**:\n\nWhen an unused value is passed into the chart, a strict lint will throw an error. For example, given the chart, a `helm lint --strict` will fail.\n\n```\n# templa" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n# templates/configmap.yaml\r\nkind: ConfigMap\r\napiVersion: v1\r\nmetadata:\r\n name: example-chart\r\n namespace: example-chart\r\n labels:\r\n team: infrastructure\r\n chart_name: {{ $.Chart.Name }}\r\n chart_version: {{ $.Chart.Version }}\r\n annotations:\r\n deleteme: \"if found\"\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "@drewgonzales360 the solution you built for your company, is that something that is shareable or is it a very custom fit solution?\n\nFor example i did a verrry rough POC in a bash script that pulls a helms values.yaml tturns them into the `x.y.z` key format and diffs against the values.yaml that we provide in the same format\n\nIt works... but there are assumptions around the remote values.yaml being correct and all object keys being implemented which is almost always never fully filled out.\n\nWas just curious if your approach was any cleaner/smarter/robust? \n\n(for anyone curious)\ni.e\n\n```\n#!/bin/bash\nfile1=$(mktemp)\nfile2=$(mktemp)\nkeys1=$(mktemp)\nkeys2=$(mktemp)\n\nwget https://raw.githubusercontent.com/argoproj/argo-helm/main/charts/argo-rollouts/values.yaml -O \"$file1\"\ncat ./values.yaml >\"$file2\"\n\ngrep -v -E '^[[:space:]]*(-|#)' \"$file1\" | grep -v '^$' >\"$file1.tmp\" && mv \"$file1.tmp\" \"$file1\"\ngrep -v -E '^[[:space:]]*(-|#)' \"$file2\" | grep -v '^$' >\"$file2.tmp\" && mv \"$file2.tmp\" \"$file2\"\n\n# Recursive function to print full paths of all keys\nfunction traverse() {\n local file=$1\n local path=$2\n echo \"$path\" | tr -d \"-\" | sed 's/\\.$//'\n local keys=$(yq e \".${path} | keys\" \"$file\" 2>/dev/null)\n for key in $keys; do\n traverse \"$file\" \"${path}.${key}\"\n done\n}\n\n# Generate lists of keys for both files\n\nkeys=$(yq e 'keys' \"$file1\")\nfor key in $keys; do\n traverse \"$file1\" \"$key\" >>\"$keys1\"\ndone\ncat \"$keys1\" | sort | uniq >\"$keys1.tmp\" && mv \"$keys1.tmp\" \"$k", + "codeSnippets": [ + "# templates/configmap.yaml\r\nkind: ConfigMap\r\napiVersion: v1\r\nmetadata:\r\n name: example-chart\r\n namespace: example-chart\r\n labels:\r\n team: infrastructure\r\n chart_name: {{ $.Chart.Name }}\r\n chart_version: {{ $.Chart.Version }}\r\n annotations:\r\n deleteme: \"if found\"", + "# templates/configmap.yaml\r\nkind: ConfigMap\r\napiVersion: v1\r\nmetadata:\r\n name: example-chart\r\n namespace: example-chart\r\n labels:\r\n team: infrastructure\r\n chart_name: {{ $.Chart.Name }}\r\n chart_version: {{ $.Chart.Version }}\r\n annotations:\r\n deleteme: \"if found\"", + "# values.yaml\r\nsuper:\r\n unused: REQUIRED\r\n value:\r\n that: REQUIRED\r\n is_not: REQUIRED\r\n used:\r\n ever: REQUIRED", + "❯ /usr/local/bin/helm lint --strict example-chart\r\n==> Linting example-chart\r\n[INFO] Chart.yaml: icon is recommended\r\n[ERROR] templates/: there are unused fields in values files [.Values.super.unused .Values.super.used.ever .Values.super.value.is_not .Values.super.value.that]\r\n\r\nError: 1 chart(s) linted, 1 chart(s) failed", + "usedValues = sets.Set[string]()\r\nprovidedValues = sets.Set[string]()\r\nfor every file in the chart\r\n usedValues.insert( all values referenced in the file )\r\n providedValues.insert( all values provided )\r\nend\r\n\r\nunusedValues = setDifference(usedValues, providedValues)\r\nreturn unusedValues" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "troubleshoot", + "size-l" + ], + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Deployment", + "Configmap", + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/helm/helm/pull/11760", + "repo": "https://github.com/helm/helm" + }, + "reactions": 24, + "comments": 29, + "synthesizedBy": "regex", + "qualityScore": 70 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with helm installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:33.826Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-12879-bugfix-override-subcharts-with-null-values.json b/solutions/cncf-generated/helm/helm-12879-bugfix-override-subcharts-with-null-values.json new file mode 100644 index 00000000..edbdc026 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-12879-bugfix-override-subcharts-with-null-values.json @@ -0,0 +1,79 @@ +{ + "version": "kc-mission-v1", + "name": "helm-12879-bugfix-override-subcharts-with-null-values", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "helm: bugfix: Override subcharts with null values", + "description": "This PR closes #12469 and closes #12488\n\nHelm should allow users to not only override default values, but also completely remove any default values by setting a config to `null`. \n\nThis works fine for regular charts, but default values within sub-charts cannot be `null`-ed. The linked issue has a good example of this created by user \"naemono.\" \n\nThe reason this issue is happening is because the `coalesce` function goes over sub-chart values that are defined in a values file or with a `--set` f", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR closes #12469 and closes #12488\n\nHelm should allow users to not only override default values, but also completely remove any default values by setting a config to `null`. \n\nThis works fine fo" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nworker:\r\n app:\r\n resources:\r\n limits:\r\n cpu: null\r\n memory: 1500Mi\r\n requests:\r\n cpu: 50m\r\n memory: 100Mi\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/frain-dev/helm-charts/pull/23. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Hello !\n\nCPU limits shouldn't be set by default on subcharts and users should be able to configure the pods without them.\n\nHelm isn't able to nullify subcharts by-default values: https://github.com/helm/helm/pull/12879\n\nThis modification simply passes the default configuration of CPU and memory limits from the sub-charts to the main chart. I've kept the basic values as a recommendation, but the values are now nullifiable in a custom values.yaml:\n\n```yaml\nworker:\n app:\n resources:\n limits:\n cpu: null\n memory: 1500Mi\n requests:\n cpu: 50m\n memory: 100Mi\n```", + "codeSnippets": [ + "worker:\r\n app:\r\n resources:\r\n limits:\r\n cpu: null\r\n memory: 1500Mi\r\n requests:\r\n cpu: 50m\r\n memory: 100Mi", + "worker:\r\n app:\r\n resources:\r\n limits:\r\n cpu: null\r\n memory: 1500Mi\r\n requests:\r\n cpu: 50m\r\n memory: 100Mi" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "troubleshoot", + "size-m", + "bug" + ], + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/helm/helm/pull/12879", + "repo": "https://github.com/helm/helm", + "pr": "https://github.com/frain-dev/helm-charts/pull/23" + }, + "reactions": 119, + "comments": 65, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with helm installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:03.219Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-5492-feat-helm-add-app-version-flag-to-helm-install-upgrade.json b/solutions/cncf-generated/helm/helm-5492-feat-helm-add-app-version-flag-to-helm-install-upgrade.json new file mode 100644 index 00000000..e96736b1 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-5492-feat-helm-add-app-version-flag-to-helm-install-upgrade.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "helm-5492-feat-helm-add-app-version-flag-to-helm-install-upgrade", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "helm: feat(helm): add --app-version flag to 'helm install/upgrade'", + "description": "New pull request for https://github.com/helm/helm/pull/4961, sorry about that\n\n**If applicable**:\n- [X] this PR contains documentation", + "type": "upgrade", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "New pull request for https://github.com/helm/helm/pull/4961, sorry about that\n\n**If applicable**:\n- [X] this PR contains documentation" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n#!/bin/bash -eo pipefail\r\n.circleci/test.sh\r\nRunning 'make build'\r\nGOBIN=/go/src/k8s.io/helm/bin go install -tags '' -ldflags '-w -s -X k8s.io/helm/pkg/version.GitCommit=b5f34b33bb0a61800ed83066f9654c5b5fa29ed0 -X k8s.io/helm/pkg/version.GitTreeState=clean' k8s.io/helm/cmd/...\r\nRunning 'make verify-docs'\r\nGOBIN=/go/src/k8s.io/helm/bin go install -tags '' -ldflags '-w -s -X k8s.io/helm/pkg/version.GitCommit=b5f34b33bb0a61800ed83066f9654c5b5fa29ed0 -X k8s.io/helm/pkg/version.GitTreeState=clean' \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/helm/helm/pull/4961. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "When 'helm install/upgrade --app-version 1.0.0' is run, this will override the chart app version\n\nWaiting for your feedback, not 100% sure is the most efficient way do to that", + "codeSnippets": [ + "#!/bin/bash -eo pipefail\r\n.circleci/test.sh\r\nRunning 'make build'\r\nGOBIN=/go/src/k8s.io/helm/bin go install -tags '' -ldflags '-w -s -X k8s.io/helm/pkg/version.GitCommit=b5f34b33bb0a61800ed83066f9654c5b5fa29ed0 -X k8s.io/helm/pkg/version.GitTreeState=clean' k8s.io/helm/cmd/...\r\nRunning 'make verify-docs'\r\nGOBIN=/go/src/k8s.io/helm/bin go install -tags '' -ldflags '-w -s -X k8s.io/helm/pkg/version.GitCommit=b5f34b33bb0a61800ed83066f9654c5b5fa29ed0 -X k8s.io/helm/pkg/version.GitTreeState=clean'", + "#!/bin/bash -eo pipefail\r\n.circleci/test.sh\r\nRunning 'make build'\r\nGOBIN=/go/src/k8s.io/helm/bin go install -tags '' -ldflags '-w -s -X k8s.io/helm/pkg/version.GitCommit=b5f34b33bb0a61800ed83066f9654c5b5fa29ed0 -X k8s.io/helm/pkg/version.GitTreeState=clean' k8s.io/helm/cmd/...\r\nRunning 'make verify-docs'\r\nGOBIN=/go/src/k8s.io/helm/bin go install -tags '' -ldflags '-w -s -X k8s.io/helm/pkg/version.GitCommit=b5f34b33bb0a61800ed83066f9654c5b5fa29ed0 -X k8s.io/helm/pkg/version.GitTreeState=clean' k8s.io/helm/cmd/...\r\nCreating /root/.helm \r\nCreating /root/.helm/repository \r\nCreating /root/.helm/repository/cache \r\nCreating /root/.helm/repository/local \r\nCreating /root/.helm/plugins \r\nCreating /root/.helm/starters \r\nCreating /root/.helm/cache/archive \r\nCreating /root/.helm/repository/repositories.yaml \r\nAdding stable repo with URL: https://kubernetes-charts.storage.googleapis.com \r\nAdding local repo with URL: http://127.0.0.1:8879/charts \r\n$HELM_HOME has been configured at /root/.helm.\r\nNot installing Tiller due to 'client-only' flag having been set\r\n--- /tmp/tmp.xDUewHmInp/docs/helm/helm_install.md\t2019-06-07 13:13:04.965128551 +0000\r\n+++ docs/helm/helm_install.md\t2019-06-07 13:10:32.990535551 +0000\r\n@@ -83,7 +83,7 @@ helm install [CHART] [flags]\r\n --ca-file string Verify certificates of HTTPS-enabled servers using this CA bundle\r\n --cert-file string Identify HTTPS client using this SSL certificate file\r\n --dep-up Run helm dependency update before installing the chart\r\n- --description string Specify a description for the release\r\n+ --description string specify a description for the release\r\n --devel Use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.\r\n --dry-run Simulate an install\r\n -h, --help help for install\r\nhelm docs are out of date. Please run \"make docs\"\r\nMakefile:150: recipe for target 'verify-docs' failed\r\nmake: *** [verify-docs] Error 1\r\nExited with code 2", + "{{ default .Values.appVersion .Chart.AppVersion }}", + "{{ default .Values.appVersion .Chart.AppVersion }}" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "upgrade", + "in-progress", + "size-l" + ], + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "issueTypes": [ + "upgrade" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/helm/helm/pull/5492", + "repo": "https://github.com/helm/helm", + "pr": "https://github.com/helm/helm/pull/4961" + }, + "reactions": 43, + "comments": 25, + "synthesizedBy": "regex", + "qualityScore": 66 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with helm installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:13.596Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-7431-feat-helm-add-recreate-upgrade-rollback-strategy.json b/solutions/cncf-generated/helm/helm-7431-feat-helm-add-recreate-upgrade-rollback-strategy.json new file mode 100644 index 00000000..7604c6ee --- /dev/null +++ b/solutions/cncf-generated/helm/helm-7431-feat-helm-add-recreate-upgrade-rollback-strategy.json @@ -0,0 +1,87 @@ +{ + "version": "kc-mission-v1", + "name": "helm-7431-feat-helm-add-recreate-upgrade-rollback-strategy", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "helm: feat(helm): add recreate upgrade (rollback) strategy", + "description": "An additional optional flag `--recreate` can be passed on upgrade (or rollback) of a release.\nIn combination with the `--force` flag the following strategies are employed when updating a resource (which can be directly compared to kubectl):\n\n```\nhelm kubectl action on 'invalid' or 'conflict'\n--------------------------------------------------------------------------------------------------------------\n\nupgrade apply ", + "type": "upgrade", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "An additional optional flag `--recreate` can be passed on upgrade (or rollback) of a release.\nIn combination with the `--force` flag the following strategies are employed when updating a resource (whi" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nhelm kubectl action on 'invalid' or 'conflict'\r\n--------------------------------------------------------------------------------------------------------------\r\n\r\nupgrade apply PATCH fail\r\n\r\nupgrade --force replace PUT fail\r\n\r\nupgrade --recreate apply --force PATCH DELETE -> GET (poll) -> POST\r\n\r\nupgrad\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/helm/helm/pull/7653. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR changes the behavior of \"helm upgrade\" command when there have been no successful and at least one failed release. This allows the user to recover from a failed install (or a partially failed install) without uninstalling the release. The user can now decide to decide to ignore the \"no deployed releases\" errors and allow it to upgrade a failed release.\n\nThere were no test cases that tested the failure scenarios of upgrading a failed/missing release so I added them and also added additional test cases that test the new functionality.\n\nFixes issue #5595", + "codeSnippets": [ + "helm kubectl action on 'invalid' or 'conflict'\r\n--------------------------------------------------------------------------------------------------------------\r\n\r\nupgrade apply PATCH fail\r\n\r\nupgrade --force replace PUT fail\r\n\r\nupgrade --recreate apply --force PATCH DELETE -> GET (poll) -> POST\r\n\r\nupgrad", + "helm kubectl action on 'invalid' or 'conflict'\r\n--------------------------------------------------------------------------------------------------------------\r\n\r\nupgrade apply PATCH fail\r\n\r\nupgrade --force replace PUT fail\r\n\r\nupgrade --recreate apply --force PATCH DELETE -> GET (poll) -> POST\r\n\r\nupgrade --recreate --force replace --force DELETE -> GET (poll) -> POST fail", + "Update(original, target ResourceList, force bool) (*Result, error)", + "Update(original, target ResourceList, options UpdateOptions) (*Result, error)" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "upgrade", + "feature", + "size-xl", + "v3-x" + ], + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Service", + "Storageclass", + "Role", + "Rolebinding" + ], + "difficulty": "advanced", + "issueTypes": [ + "upgrade" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/helm/helm/pull/7431", + "repo": "https://github.com/helm/helm", + "pr": "https://github.com/helm/helm/pull/7653" + }, + "reactions": 52, + "comments": 82, + "synthesizedBy": "regex", + "qualityScore": 66 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with helm installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:09.462Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-7649-adopt-resources-into-release-with-correct-instance-and-managed-by-labe.json b/solutions/cncf-generated/helm/helm-7649-adopt-resources-into-release-with-correct-instance-and-managed-by-labe.json new file mode 100644 index 00000000..6d7ebf5d --- /dev/null +++ b/solutions/cncf-generated/helm/helm-7649-adopt-resources-into-release-with-correct-instance-and-managed-by-labe.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "helm-7649-adopt-resources-into-release-with-correct-instance-and-managed-by-labe", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "helm: Adopt resources into release with correct instance and managed-by labels", + "description": "Alternative implementation to https://github.com/helm/helm/pull/7627, https://github.com/helm/helm/pull/7625, and https://github.com/helm/helm/pull/7575.\n\nHelp with validation would be appreciated, but in theory this closes https://github.com/helm/helm/issues/6850, closes https://github.com/helm/helm/issues/4824, closes https://github.com/helm/helm/issues/2947, and closes https://github.com/helm/helm/issues/7418. This implementation would also help make https://github.com/helm/helm/issues/2730 v", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Alternative implementation to https://github.com/helm/helm/pull/7627, https://github.com/helm/helm/pull/7625, and https://github.com/helm/helm/pull/7575.\n\nHelp with validation would be appreciated, bu" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nKIND=deployment\r\nNAME=my-app-staging\r\nRELEASE=staging\r\nNAMESPACE=default\r\nkubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE\r\nkubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE\r\nkubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/helm/helm/pull/7625. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Related to https://github.com/helm/helm/issues/6850, and an alternative implementation to https://github.com/helm/helm/pull/7575.\n\n### Steps to Reproduce\n\nCreate a chart with\n\n```yaml\napiVersion: apps/v1beta2\nkind: Deployment\n```\n\nDeploy a release of that chart, then switch to apps/v1:\n\n```diff\n- apiVersion: apps/v1beta2\n+ apiVersion: apps/v1\nkind: Deployment\n```\n\nRun `upgrade`, and you should get the following error before this patch:\n\n```\nrendered manifests contain a new resource that already exists. Unable to continue with update: existing resource conflict: namespace: default, name: version-migrate-test, existing_kind: apps/v1, Kind=Deployment, new_kind: apps/v1, Kind=Deployment\n```", + "codeSnippets": [ + "KIND=deployment\r\nNAME=my-app-staging\r\nRELEASE=staging\r\nNAMESPACE=default\r\nkubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE\r\nkubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE\r\nkubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm", + "KIND=deployment\r\nNAME=my-app-staging\r\nRELEASE=staging\r\nNAMESPACE=default\r\nkubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE\r\nkubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE\r\nkubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm", + "apiVersion: apps/v1beta2\r\nkind: Deployment", + "Run `upgrade`, and you should get the following error before this patch:", + "See also discussion on labels in https://github.com/helm/helm/issues/6365#issuecomment-547151315\nthanks @jlegrone. I'm going to give this a spin today. Looks like there are some significant behavioural changes under the hood to `helm install` with the change to call `KubeClient.Update`, so I want to make sure we're not introducing any regressions here.\nis this still a work-in-progress, or can we remove that label?\n> is this still a work-in-progress, or can we remove that label?\r\n\r\n@bacongobbler yes, see TODOs in the description. I think the main outstanding issue is to validate 3-way diff behavior, which is why I've kept the wip label for now.\nA basic test looks promising!\r\n\r\nI created two pods: `foo` and `bar`. One had the annotations, the other didn't.\r\n\r\nHere's what I used for testing: [testchart.zip](https://github.com/helm/helm/files/4294581/testchart.zip)\r\n\r\nTo create the pods, I used:" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "troubleshoot", + "size-l" + ], + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Deployment", + "Namespace" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/helm/helm/pull/7649", + "repo": "https://github.com/helm/helm", + "pr": "https://github.com/helm/helm/pull/7625" + }, + "reactions": 127, + "comments": 56, + "synthesizedBy": "regex", + "qualityScore": 70 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with helm installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:00.808Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-8841-add-include-file-flag-for-external-files.json b/solutions/cncf-generated/helm/helm-8841-add-include-file-flag-for-external-files.json new file mode 100644 index 00000000..57e6346d --- /dev/null +++ b/solutions/cncf-generated/helm/helm-8841-add-include-file-flag-for-external-files.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "helm-8841-add-include-file-flag-for-external-files", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "helm: Add include-file flag for external files ", + "description": "This PR implements the `--include-file` and `--include-dir` flags for the install, upgrade and template commands.\n\nThese flags load local files and make them available to the chart, so they can be used in templates with functions like `.Files.Get` or `.Files.Glob`.\n\nThis is my first PR here, and I would appreciate comments regarding style, code split (what goes where), and the amount and quality of tests that we need.\n\nThank you!\n\n**Parsing**\n\nBoth flags are arrays and can have multiple values, ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR implements the `--include-file` and `--include-dir` flags for the install, upgrade and template commands.\n\nThese flags load local files and make them available to the chart, so they can be use" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nhelm template test . --include-file my_license.conf=../license.conf,foo=foo.txt --include-file bar=bar.txt --set license=my_license.conf\r\n\r\n# in chart/templates/cmap.yml\r\n{{ (.Files.Glob .Values.license).AsConfig | indent 2 }}\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/helm/helm/pull/8227. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR implements the `--include-file` and `--include-dir` flags for the install, upgrade and template commands. \n\nThese flags load local files and make them available to the chart, so they can be used in templates with functions like `.Files.Get` or `.Files.Glob`.\n\nThis is my first PR here, and I would appreciate comments regarding style, code split (what goes where), and the amount and quality of tests that we need. \n\nThank you!\n\n**Parsing**\n\nBoth flags are arrays and can have multiple values, either comma-separated, or by writing the flag multiple times. \n\nBoth flags require a key and a path. They will get parsed just like `--set*`, and later values will overwrite previous ones. The paths are then added as if they are part of the chart; the keys represent the file names inside the chart.\n\nYou can use both flags in the same command. Files are parsed first, then directories.\n\n**Single files**\n\n```\nhelm template test . --include-file my_license.conf=../license.conf,foo=foo.txt --include-file bar=bar.txt --set license=my_license.conf\n\n# in chart/templates/cmap.yml\n{{ (.Files.Glob .Values.license).AsConfig | indent 2 }}\n```\n\n**Dirs**\n\nThe include-dir flag can include all the files from a local directory. It will not recurse in subdirectories.\n\n```\nhelm template test . --include-dir certs=../certs\n\n# in chart/templates/cmap.yml\n{{ (.Files.Glob \"certs/\").AsConfig | indent 2 }}\n```\n\n**Globs**\n\nThe `--include-dir` flag optionally supports a glob. It can recurse, but it will take e", + "codeSnippets": [ + "helm template test . --include-file my_license.conf=../license.conf,foo=foo.txt --include-file bar=bar.txt --set license=my_license.conf\r\n\r\n# in chart/templates/cmap.yml\r\n{{ (.Files.Glob .Values.license).AsConfig | indent 2 }}", + "helm template test . --include-file my_license.conf=../license.conf,foo=foo.txt --include-file bar=bar.txt --set license=my_license.conf\r\n\r\n# in chart/templates/cmap.yml\r\n{{ (.Files.Glob .Values.license).AsConfig | indent 2 }}", + "helm template test . --include-dir certs=../certs\r\n\r\n# in chart/templates/cmap.yml\r\n{{ (.Files.Glob \"certs/\").AsConfig | indent 2 }}", + "helm template test . --include-dir conf=../prod/conf/*.conf\r\n\r\n# in chart/templates/cmap.yml\r\n{{ (.Files.Glob \"conf/\").AsConfig | indent 2 }}", + "helm template test . --include-file my_license.conf=../license.conf,foo=foo.txt --include-file bar=bar.txt --set license=my_license.conf\r\n\r\n# in chart/templates/cmap.yml\r\n{{ (.Files.Glob .Values.license).AsConfig | indent 2 }}" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "troubleshoot", + "size-xl" + ], + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/helm/helm/pull/8841", + "repo": "https://github.com/helm/helm", + "pr": "https://github.com/helm/helm/pull/8227" + }, + "reactions": 106, + "comments": 51, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with helm installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:05.466Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-9426-feat-helm-add-ability-for-dry-run-to-do-lookup-functions.json b/solutions/cncf-generated/helm/helm-9426-feat-helm-add-ability-for-dry-run-to-do-lookup-functions.json new file mode 100644 index 00000000..5b126853 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-9426-feat-helm-add-ability-for-dry-run-to-do-lookup-functions.json @@ -0,0 +1,74 @@ +{ + "version": "kc-mission-v1", + "name": "helm-9426-feat-helm-add-ability-for-dry-run-to-do-lookup-functions", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "helm: feat(helm): add ability for --dry-run to do lookup functions", + "description": "When a helm command is run with the --dry-run flag, it will try to connect to the cluster\nto be able to render lookup functions.\n\n**What this PR does / why we need it**: It is hard to debug the lookup function and currently there is not a good way to test it with any flags. #8137 Stated that the `--dry-run` was fair game to try to implement this logic as long as the `helm template` logic stays the same.\n\n**Special notes for your reviewer**: This is my first PR for the Helm; please let me know if", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "When a helm command is run with the --dry-run flag, it will try to connect to the cluster\nto be able to render lookup functions.\n\n**What this PR does / why we need it**: It is hard to debug the lookup" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n--dry-run='none':\r\n\tMust be \"none\", \"server\", or \"client\". If client strategy, only print the object that\r\n\twould be sent, without sending it. If server strategy, submit server-side request without\r\n\tpersisting the resource.\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "I second @joejulian said about `--dry-run='none'|'client'|'server'`. The PR here (as it stands) is a change in behavior. \n\nCurrently `helm install --dry-run=true` does not contact the cluster, and to the point of the security advisory/model ([GHSA-q8q8-93cv-v6h8](https://github.com/helm/helm/security/advisories/GHSA-q8q8-93cv-v6h8)), I think would be a breaking change for some users (ie. they could suddenly get secrets printed and logged in a CI system; see e.g. https://github.com/helm/helm/issues/7275)\n\nI put a proposal on the main issue (rather than this implementation PR [here](https://github.com/helm/helm/issues/8137#issuecomment-1365384675)", + "codeSnippets": [ + "--dry-run='none':\r\n\tMust be \"none\", \"server\", or \"client\". If client strategy, only print the object that\r\n\twould be sent, without sending it. If server strategy, submit server-side request without\r\n\tpersisting the resource.", + "--dry-run='none':\r\n\tMust be \"none\", \"server\", or \"client\". If client strategy, only print the object that\r\n\twould be sent, without sending it. If server strategy, submit server-side request without\r\n\tpersisting the resource.", + "W0919 10:06:16.181516 1786127 helpers.go:639] --dry-run is deprecated and can be replaced with --dry-run=client." + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "deploy", + "size-m" + ], + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "deploy" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/helm/helm/pull/9426", + "repo": "https://github.com/helm/helm" + }, + "reactions": 95, + "comments": 51, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with helm installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:06.842Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-9534-allow-configuring-install-order-of-custom-resources.json b/solutions/cncf-generated/helm/helm-9534-allow-configuring-install-order-of-custom-resources.json new file mode 100644 index 00000000..231dde3f --- /dev/null +++ b/solutions/cncf-generated/helm/helm-9534-allow-configuring-install-order-of-custom-resources.json @@ -0,0 +1,86 @@ +{ + "version": "kc-mission-v1", + "name": "helm-9534-allow-configuring-install-order-of-custom-resources", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "helm: Allow configuring install order of Custom Resources", + "description": "**What this PR does / why we need it**:\n\nBy default Helm installs Kubernetes Kinds that doesn't exist in the internal [kube_sorter.go](https://github.com/helm/helm/blob/d55c53df4e394fb62b0514a09c57bce235dd7877/pkg/releaseutil/kind_sorter.go#L31-L66) at the very end of a helm installation. This causes some serious issues with Custom Resources that need to be deployed before specific other resources.\n\nThis PR introduces a new Annotation called `helm.sh/install-before`. With this annotation user ca", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**What this PR does / why we need it**:\n\nBy default Helm installs Kubernetes Kinds that doesn't exist in the internal [kube_sorter.go](https://github.com/helm/helm/blob/d55c53df4e394fb62b0514a09c57bce" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n# Source: generic-service/templates/securitygrouppolicy.yaml\r\napiVersion: vpcresources.k8s.aws/v1beta1\r\nkind: SecurityGroupPolicy\r\nmetadata:\r\n name: niklas-debug\r\n labels:\r\n helm.sh/chart: generic-service-0.5.0\r\n app.kubernetes.io/name: niklas-debug\r\n app.kubernetes.io/instance: niklas-debug\r\n app.kubernetes.io/version: \"0.1.0\"\r\n app.kubernetes.io/managed-by: Helm\r\n annotations:\r\n helm.sh/install-before: \"Deployment,Statefulset,DaemonSet\"\r\nspec:\r\n podSelector: \r\n matchLa\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/helm/community/pull/230. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This HIP will cover the option to customize the install order of custom resources. \n\nhttps://github.com/helm/helm/pull/9534", + "codeSnippets": [ + "# Source: generic-service/templates/securitygrouppolicy.yaml\r\napiVersion: vpcresources.k8s.aws/v1beta1\r\nkind: SecurityGroupPolicy\r\nmetadata:\r\n name: niklas-debug\r\n labels:\r\n helm.sh/chart: generic-service-0.5.0\r\n app.kubernetes.io/name: niklas-debug\r\n app.kubernetes.io/instance: niklas-debug\r\n app.kubernetes.io/version: \"0.1.0\"\r\n app.kubernetes.io/managed-by: Helm\r\n annotations:\r\n helm.sh/install-before: \"Deployment,Statefulset,DaemonSet\"\r\nspec:\r\n podSelector: \r\n matchLa", + "# Source: generic-service/templates/securitygrouppolicy.yaml\r\napiVersion: vpcresources.k8s.aws/v1beta1\r\nkind: SecurityGroupPolicy\r\nmetadata:\r\n name: niklas-debug\r\n labels:\r\n helm.sh/chart: generic-service-0.5.0\r\n app.kubernetes.io/name: niklas-debug\r\n app.kubernetes.io/instance: niklas-debug\r\n app.kubernetes.io/version: \"0.1.0\"\r\n app.kubernetes.io/managed-by: Helm\r\n annotations:\r\n helm.sh/install-before: \"Deployment,Statefulset,DaemonSet\"\r\nspec:\r\n podSelector: \r\n matchLabels:\r\n app.kubernetes.io/name: niklas-debug\r\n app.kubernetes.io/instance: niklas-debug\r\n securityGroups:\r\n groupIds:\r\n - sg-xyz" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "deploy", + "size-l", + "needs-rebase", + "stale" + ], + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Service", + "Statefulset", + "Daemonset" + ], + "difficulty": "beginner", + "issueTypes": [ + "deploy" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/helm/helm/pull/9534", + "repo": "https://github.com/helm/helm", + "pr": "https://github.com/helm/community/pull/230" + }, + "reactions": 28, + "comments": 7, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with helm installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:22.526Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/helm/helm-9782-implement-changes-proposed-in-hip-6-oci-support.json b/solutions/cncf-generated/helm/helm-9782-implement-changes-proposed-in-hip-6-oci-support.json new file mode 100644 index 00000000..50562196 --- /dev/null +++ b/solutions/cncf-generated/helm/helm-9782-implement-changes-proposed-in-hip-6-oci-support.json @@ -0,0 +1,77 @@ +{ + "version": "kc-mission-v1", + "name": "helm-9782-implement-changes-proposed-in-hip-6-oci-support", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "helm: Implement changes proposed in HIP 6: OCI Support", + "description": "For more information, please see the following URL: https://github.com/helm/community/blob/main/hips/hip-0006.md\n\nNote: OCI support remains experimental, and you are still required to set HELM_EXPERIMENTAL_OCI=1 in your environment.\n\n**What this PR does / why we need it**:\n\nThis PR implements the changes outlined in HIP 6. This will provide a more stable foundation for Helm's OCI registry support.\n\nThis adds a new `helm push` subcommand and `Pusher` interface for uploading charts, which can be e", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "For more information, please see the following URL: https://github.com/helm/community/blob/main/hips/hip-0006.md\n\nNote: OCI support remains experimental, and you are still required to set HELM_EXPERIM" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nhelm install oci://example.com/charts/nginx --version 0.1.0\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Running `helm dep update` gives an error but still succeeds\n```\nhelm dep update\nGetting updates for unmanaged Helm repositories...\n...Unable to get an update from the \"oci://localhost:5000/myuser\" chart repository:\n\tobject required\nHang tight while we grab the latest from your chart repositories...\nUpdate Complete. ⎈Happy Helming!⎈\nSaving 1 charts\nDownloading mychart from repo oci://localhost:5000/myuser\nPulled: localhost:5000/myuser/mychart:0.1.0\nDigest: sha256:6cb1fa62c9df1f962093a528b34a0f197e68b46a2656764db62b068cd6c56890\nDeleting outdated charts\n```", + "codeSnippets": [ + "helm install oci://example.com/charts/nginx --version 0.1.0", + "helm install oci://example.com/charts/nginx --version 0.1.0", + "//:", + "? Or just let it go?\n@jdolitsky I'm not ignoring your question. I was hoping a maintainer would chime in. I did raise this at the last helm meeting and they noted it was worth more thought.\nGood point.\r\n\r\nIt's a bit of an interesting situation... We exceedingly marked it as experimental, though it somehow went through code review and ended up in a public package.\r\n\r\n[Our compatibility contract](https://github.com/helm/helm/blob/main/CONTRIBUTING.md#semantic-versioning) states that changes SHOULD remain backward compatible. The keyword SHOULD was carefully considered for cases like this. In this particular case (an experimental flag), I think we can make the exception to remove it entirely. Had the experiment failed and we had to remove the experiment from the Helm project, we would've had to do the same thing anyways. /shrug\r\n\r\nI'd be comfortable with removing it entirely this time. But we should be more strict about putting experimental APIs in `internal` moving forward.\r\n\r\nI'll let other maintainers chime in here and see what they think - I'm sure community members relying on these APIs may have opinions as well, so I'd ask around.\nAs far as [HIP 4](https://github.com/helm/community/pull/145) is concerned:\r\n\r\n> * Experimental features are not required to ensure backward compatibility for their feature set. (They cannot, however, break backward compatibility for other parts of Helm.) Thus, a release new release of an existing experimental feature may break APIs, change its chart representations, or modify its command-line flags as long as it does not break the compatibility of non-experimental features.\r\n\r\nFollowing that guideline, new releases of experimental features may break backwards compatibility.\nLooking at the files added to public package (`/pkg`), they are experimental and can be removed cleanly without affecting the other supported APIs. It is unfortunate that they ended up in the public package but it doesn't break backwards compatibility per se. I would have no objection removing them.\nRunning `helm dep update` gives an error but still succeeds" + ] + } + }, + "metadata": { + "tags": [ + "helm", + "graduated", + "app-definition", + "troubleshoot", + "size-xxl" + ], + "cncfProjects": [ + "helm" + ], + "targetResourceKinds": [ + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/helm/helm/pull/9782", + "repo": "https://github.com/helm/helm" + }, + "reactions": 22, + "comments": 40, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with helm installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:35.279Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/inspektor-gadget/inspektor-gadget-1396-making-gadgets-self-contained-adding-metadata-operators-in.json b/solutions/cncf-generated/inspektor-gadget/inspektor-gadget-1396-making-gadgets-self-contained-adding-metadata-operators-in.json new file mode 100644 index 00000000..fa8151da --- /dev/null +++ b/solutions/cncf-generated/inspektor-gadget/inspektor-gadget-1396-making-gadgets-self-contained-adding-metadata-operators-in.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "inspektor-gadget-1396-making-gadgets-self-contained-adding-metadata-operators-in", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "inspektor-gadget: Making gadgets self-contained, adding metadata, operators, interfaces and more: Part 2", + "description": "This is the second PR of this series, the first one was #1281.\n\nThis part focuses on the communication between `kubectl-gadget` and `gadgettracermanager`. Communication has been replaced with gRPC, so that the whole lifecycle of a gadget on each node is now handled by a connection to each node rather than a CR. In most cases this will simplify things, as there are no longer stale traces, response times should be lower and in general the workflow is a lot simpler and less error prone.\n\nCRs will r", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This is the second PR of this series, the first one was #1281.\n\nThis part focuses on the communication between `kubectl-gadget` and `gadgettracermanager`. Communication has been replaced with gRPC, so" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ kubectl gadget bpftrace -e 'tracepoint:syscalls:sys_enter_openat { printf(\"%s %s\\n\", comm, str(args->filename)); }'\r\nNODE OUTPUT\r\nminikube-m02 Attaching 1 probe...\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube-m03 Attaching 1 probe...\r\nminikube-m02 bpftrace /dev/null\r\nminikube-m02 \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/inspektor-gadget/inspektor-gadget/pull/1370. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR implements a basic support for bpftrace. \n\n```bash\n$ kubectl gadget bpftrace -e 'tracepoint:syscalls:sys_enter_openat { printf(\"%s %s\\n\", comm, str(args->filename)); }'\nNODE OUTPUT\nminikube-m02 Attaching 1 probe...\nminikube-m02 bpftrace /sys/devices/system/cpu/online\nminikube-m02 bpftrace /sys/devices/system/cpu/online\nminikube-m03 Attaching 1 probe...\nminikube-m02 bpftrace /dev/null\nminikube-m02 bpftrace /sys/kernel/debug/tracing/events/syscalls/sys_enter_openat/id\nminikube-m02 bpftrace /sys/devices/system/cpu/online\nminikube-m03 bpftrace /sys/devices/system/cpu/online\nminikube-m03 bpftrace /sys/devices/system/cpu/online\nminikube-m02 bpftrace /sys/devices/system/cpu/online\nminikube Attaching 1 probe...\nminikube-m02 bpftrace /dev/null\nminikube-m02 bpftrace /sys/kernel/debug/tracing/events/syscalls/sys_enter_openat/id\nminikube-m02 runc /usr/bin/runc\nminikube-m02 runc /proc/sys/kernel/cap_last_cap\nminikube runc /proc/sys/kernel/cap_last_cap\nminikube-m03 runc /proc/sys/kernel/cap_last_cap\nminikube-m02 runc\n...\n```\n\nTODO: (for future PRs?)", + "codeSnippets": [ + "$ kubectl gadget bpftrace -e 'tracepoint:syscalls:sys_enter_openat { printf(\"%s %s\\n\", comm, str(args->filename)); }'\r\nNODE OUTPUT\r\nminikube-m02 Attaching 1 probe...\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube-m03 Attaching 1 probe...\r\nminikube-m02 bpftrace /dev/null\r\nminikube-m02", + "$ kubectl gadget bpftrace -e 'tracepoint:syscalls:sys_enter_openat { printf(\"%s %s\\n\", comm, str(args->filename)); }'\r\nNODE OUTPUT\r\nminikube-m02 Attaching 1 probe...\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube-m03 Attaching 1 probe...\r\nminikube-m02 bpftrace /dev/null\r\nminikube-m02 bpftrace /sys/kernel/debug/tracing/events/syscalls/sys_enter_openat/id\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube-m03 bpftrace /sys/devices/system/cpu/online\r\nminikube-m03 bpftrace /sys/devices/system/cpu/online\r\nminikube-m02 bpftrace /sys/devices/system/cpu/online\r\nminikube Attaching 1 probe...\r\nminikube-m02 bpftrace /dev/null\r\nminikube-m02 bpftrace /sys/kernel/debug/tracing/events/syscalls/sys_enter_openat/id\r\nminikube-m02 runc /usr/bin/runc\r\nminikube-m02 runc /proc/sys/kernel/cap_last_cap\r\nminikube runc /proc/sys/kernel/cap_last_cap\r\nminikube-m03 runc /proc/sys/kernel/cap_last_cap\r\nminikube-m02 runc\r\n...", + "> \"node1\": {histogram in JSON format}\r\n> \"node2\": {histogram in JSON format}\r\n>", + "> [{socket1 of node1}, {socket2 of node1}]\r\n> [{socket1 of node2}, {socket2 of node2}]\r\n>", + "> [{socket1 of node1}, {socket2 of node1}]\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "inspektor-gadget", + "sandbox", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "inspektor-gadget" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/inspektor-gadget/inspektor-gadget/pull/1396", + "repo": "https://github.com/inspektor-gadget/inspektor-gadget", + "pr": "https://github.com/inspektor-gadget/inspektor-gadget/pull/1370" + }, + "reactions": 2, + "comments": 19, + "synthesizedBy": "regex", + "qualityScore": 69 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with inspektor-gadget installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:40:41.998Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-13500-wip-add-support-for-using-jaeger-operator-if-installed-and-enabled-t.json b/solutions/cncf-generated/istio/istio-13500-wip-add-support-for-using-jaeger-operator-if-installed-and-enabled-t.json new file mode 100644 index 00000000..fd60ed31 --- /dev/null +++ b/solutions/cncf-generated/istio/istio-13500-wip-add-support-for-using-jaeger-operator-if-installed-and-enabled-t.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "istio-13500-wip-add-support-for-using-jaeger-operator-if-installed-and-enabled-t", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "istio: WIP: Add support for using Jaeger operator, if installed and enabled, to p…", + "description": "…rovide production deployments\n\nThis PR supersedes #9508, using a similar approach to #13407, expecting the operator to have been installed previously.\n\nThe operator is enabled using `--set tracing.jaeger.operator.enabled`. By default an in-memory deployment of Jaeger would be used (equivalent to the non-operator approach).\n\nThe Jaeger operator's custom resource is defined under the `tracing.jaeger.spec` node (suggestion for another subnode name welcome).\n\nFor example, a production deployment ca", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "…rovide production deployments\n\nThis PR supersedes #9508, using a similar approach to #13407, expecting the operator to have been installed previously.\n\nThe operator is enabled using `--set tracing.ja" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nhelm template install/kubernetes/helm/istio --name istio --namespace istio-system --set prometheus.operator.createServiceMonitors=true --set prometheus.operator.createPrometheusResource=true | kubectl apply -f -\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/istio/istio/pull/13407. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR introduces install options for generating custom resources for the [Prometheus Operator](https://github.com/coreos/prometheus-operator). These resources mirror, as closely as they can, the existing Istio config for the Prometheus addon. This configuration is meant to provide *alpha* quality support.\n\nThe following install options are provided:\n- `prometheus.operator.createServiceMonitors` will generate a bunch of `ServiceMonitor` resources for Istio endpoints. This set includes the `istio-mesh` resources (`istio-telemetry:42422` endpoints), envoy proxy stats (`:15090/stats/prometheus` endpoints), Istio component endpoints (mixer, galley, pilot, citadel), and services/pods with prometheus scrape annotations (both for mTLS-secured services/pods and plain-text).\n- `prometheus.operator.createPrometheusResource` will generate a `Prometheus` resource that will cause the operator to create Prometheus deployment in the `istio-system` namespace.\n\nThese options should only be used in clusters where the Prometheus Operator has already been installed (example: `kubectl -n prometheus-operator apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/master/bundle.yaml`).\n\nExample install using these options:\n\n```bash\nhelm template install/kubernetes/helm/istio --name istio --namespace istio-system --set prometheus.operator.createServiceMonitors=true --set prometheus.operator.createPrometheusResource=true | kubectl apply -f -\n```\n\nWith a cluster with mTLS", + "codeSnippets": [ + "helm template install/kubernetes/helm/istio --name istio --namespace istio-system --set prometheus.operator.createServiceMonitors=true --set prometheus.operator.createPrometheusResource=true | kubectl apply -f -", + "helm template install/kubernetes/helm/istio --name istio --namespace istio-system --set prometheus.operator.createServiceMonitors=true --set prometheus.operator.createPrometheusResource=true | kubectl apply -f -", + "apiVersion: istio.banzaicloud.io/v1beta1\r\nkind: Istio\r\nmetadata:\r\n labels:\r\n controller-tools.k8s.io: \"1.0\"\r\n name: istio-sample\r\nspec:\r\n version: \"1.1.6\"\r\n mtls: false\r\n autoInjectionNamespaces:\r\n - \"default\"\r\n pilot:\r\n enabled: true\r\n...\r\n tracing:\r\n enabled: true\r\n kind: jaeger\r\n cr: my-jaeger-cr" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "deploy", + "do-not-merge-work-in-progress" + ], + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [ + "Deployment", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "deploy" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/istio/istio/pull/13500", + "repo": "https://github.com/istio/istio", + "pr": "https://github.com/istio/istio/pull/13407" + }, + "reactions": 4, + "comments": 7, + "synthesizedBy": "regex", + "qualityScore": 65 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with istio installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:02.797Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-46603-feat-gateway-api-customize-gateway-deployments-via-class-annotations.json b/solutions/cncf-generated/istio/istio-46603-feat-gateway-api-customize-gateway-deployments-via-class-annotations.json new file mode 100644 index 00000000..e5b8da92 --- /dev/null +++ b/solutions/cncf-generated/istio/istio-46603-feat-gateway-api-customize-gateway-deployments-via-class-annotations.json @@ -0,0 +1,87 @@ +{ + "version": "kc-mission-v1", + "name": "istio-46603-feat-gateway-api-customize-gateway-deployments-via-class-annotations", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "istio: feat(gateway-api): customize gateway deployments via class annotations", + "description": "This PR enhances custom `GatewayClass`'es and allows a per-class customization. Currently we have templates and service types hardcoded for each controller type instead.\n\n* Support `inject.istio.io/templates` annotation on `GatewayClass` to pick a custom template from the istiod ConfigMap.\n * Uses controller's default if unset.\n * Supports just a single template for now. No template chaining like for sidecars.\n* Support `networking.istio.io/service-type` annotation on `GatewayClass` to overrid", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR enhances custom `GatewayClass`'es and allows a per-class customization. Currently we have templates and service types hardcoded for each controller type instead.\n\n* Support `inject.istio.io/te" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nnetworking.istio.io/service-type: |\r\n ClusterIP\r\n ---\r\n kind: ClusterRole\r\n name: give-everyone-admin-permission\r\n ---\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubestellar/console-kb/pull/6. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# CNCF Mission Generation Report (All Batches)\n\n**Date:** 2026-02-27T17:50:38Z\n**Batches:** 11\n\n---\n## Batch: generation-report-0.md\n# CNCF Mission Generation Report\n\n**Date:** 2026-02-27T17:47:59.472Z\n**Total generated:** 184\n**Skipped (duplicates):** 0\n**Errors:** 0\n\n## Projects Processed\n\n| Project | Maturity | Issues Found | Missions Generated | Errors |\n|---------|----------|-------------|-------------------|--------|\n| argo | graduated | 20 | 20 | 0 |\n| cert-manager | graduated | 17 | 17 | 0 |\n| cilium | graduated | 15 | 15 | 0 |\n| cloudevents | graduated | 3 | 3 | 0 |\n| containerd | graduated | 15 | 15 | 0 |\n| coredns | graduated | 6 | 6 | 0 |\n| cri-o | graduated | 14 | 14 | 0 |\n| crossplane | graduated | 8 | 8 | 0 |\n| cubefs | graduated | 0 | 0 | 0 |\n| dapr | graduated | 8 | 8 | 0 |\n| dragonfly | graduated | 0 | 0 | 0 |\n| envoy | graduated | 16 | 16 | 0 |\n| etcd | graduated | 12 | 12 | 0 |\n| falco | graduated | 9 | 9 | 0 |\n| fluentd | graduated | 2 | 2 | 0 |\n| flux | graduated | 0 | 0 | 0 |\n| harbor | graduated | 7 | 7 | 0 |\n| helm | graduated | 20 | 20 | 0 |\n| in-toto | graduated | 3 | 3 | 0 |\n| istio | graduated | 9 | 9 | 0 |\n\n## Generated Missions\n\n- **argo: feat: Multiple sources for applications** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/10432)\n- **argo: feat: Extra Helm values from external git repo #5826** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/6280)\n- **argo: feat(application-controller): Add support for rollback", + "codeSnippets": [ + "networking.istio.io/service-type: |\r\n ClusterIP\r\n ---\r\n kind: ClusterRole\r\n name: give-everyone-admin-permission\r\n ---", + "networking.istio.io/service-type: |\r\n ClusterIP\r\n ---\r\n kind: ClusterRole\r\n name: give-everyone-admin-permission\r\n ---", + "gateway.istio.io/pod-template: |\r\n spec:\r\n nodeSelector: {}\r\n affinity: {}\r\n tolerations: []" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "deploy", + "ok-to-test", + "size-l", + "lifecycle-stale", + "lifecycle-automatically-closed", + "release-notes-none" + ], + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [ + "Deployment", + "Service", + "Configmap" + ], + "difficulty": "beginner", + "issueTypes": [ + "deploy" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/istio/istio/pull/46603", + "repo": "https://github.com/istio/istio", + "pr": "https://github.com/kubestellar/console-kb/pull/6" + }, + "reactions": 3, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with istio installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:11.053Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-52320-do-not-add-pod-to-ipset-if-we-have-a-partial-failure-adding-to-the-d.json b/solutions/cncf-generated/istio/istio-52320-do-not-add-pod-to-ipset-if-we-have-a-partial-failure-adding-to-the-d.json new file mode 100644 index 00000000..b1ee2f16 --- /dev/null +++ b/solutions/cncf-generated/istio/istio-52320-do-not-add-pod-to-ipset-if-we-have-a-partial-failure-adding-to-the-d.json @@ -0,0 +1,85 @@ +{ + "version": "kc-mission-v1", + "name": "istio-52320-do-not-add-pod-to-ipset-if-we-have-a-partial-failure-adding-to-the-d", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "istio: Do not add pod to ipset if we have a \"partial failure\" adding to the dataplane.", + "description": "**Please provide a description of this PR:**\n\nShould fix: https://github.com/istio/istio/issues/52218 (@howardjohn repro'd locally)\n\nBasically, when we \"add\" a pod to the mesh, there are several things that (in order) can fail.\n\n1. We can fail to get the pod netns.\n2. We can fail to inject iptables\n3. We can fail to talk to ztunnel\n4. We can fail to annotate the pod (which officially indicates to the Istio CP that the pod is \"in ambient\")\n5. We can fail to add the pod to the hostipset, so healt", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**Please provide a description of this PR:**\n\nShould fix: https://github.com/istio/istio/issues/52218 (@howardjohn repro'd locally)\n\nBasically, when we \"add\" a pod to the mesh, there are several thing" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nApplying: If pod enrollment fails in any way, do not add to ipset\nUsing index info to reconstruct a base tree...\nM\tcni/pkg/nodeagent/net.go\nM\tcni/pkg/nodeagent/server.go\nFalling back to patching base and 3-way merge...\nAuto-merging cni/pkg/nodeagent/server.go\nAuto-merging cni/pkg/nodeagent/net.go\nCONFLICT (content): Merge conflict in cni/pkg/nodeagent/net.go\nerror: Failed to merge in the changes.\nhint: Use 'git am --show-current-patch=diff' to see the failed patch\nPatch failed at 0001 If pod enr\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/istio/istio/pull/52329. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This is an automated cherry-pick of #52320", + "codeSnippets": [ + "Applying: If pod enrollment fails in any way, do not add to ipset\nUsing index info to reconstruct a base tree...\nM\tcni/pkg/nodeagent/net.go\nM\tcni/pkg/nodeagent/server.go\nFalling back to patching base and 3-way merge...\nAuto-merging cni/pkg/nodeagent/server.go\nAuto-merging cni/pkg/nodeagent/net.go\nCONFLICT (content): Merge conflict in cni/pkg/nodeagent/net.go\nerror: Failed to merge in the changes.\nhint: Use 'git am --show-current-patch=diff' to see the failed patch\nPatch failed at 0001 If pod enr", + "Applying: If pod enrollment fails in any way, do not add to ipset\nUsing index info to reconstruct a base tree...\nM\tcni/pkg/nodeagent/net.go\nM\tcni/pkg/nodeagent/server.go\nFalling back to patching base and 3-way merge...\nAuto-merging cni/pkg/nodeagent/server.go\nAuto-merging cni/pkg/nodeagent/net.go\nCONFLICT (content): Merge conflict in cni/pkg/nodeagent/net.go\nerror: Failed to merge in the changes.\nhint: Use 'git am --show-current-patch=diff' to see the failed patch\nPatch failed at 0001 If pod enrollment fails in any way, do not add to ipset\nWhen you have resolved this problem, run \"git am --continue\".\nIf you prefer to skip this patch, run \"git am --skip\" instead.\nTo restore the original branch and stop patching, run \"git am --abort\".", + "Applying: If pod enrollment fails in any way, do not add to ipset\nUsing index info to reconstruct a base tree...\nM\tcni/pkg/nodeagent/net.go\nM\tcni/pkg/nodeagent/server.go\nFalling back to patching base and 3-way merge...\nAuto-merging cni/pkg/nodeagent/server.go\nAuto-merging cni/pkg/nodeagent/net.go\nCONFLICT (content): Merge conflict in cni/pkg/nodeagent/net.go\nerror: Failed to merge in the changes.\nhint: Use 'git am --show-current-patch=diff' to see the failed patch\nPatch failed at 0001 If pod enrollment fails in any way, do not add to ipset\nWhen you have resolved this problem, run \"git am --continue\".\nIf you prefer to skip this patch, run \"git am --skip\" instead.\nTo restore the original branch and stop patching, run \"git am --abort\"." + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "troubleshoot", + "area-networking", + "size-xxl", + "area-ambient", + "cherrypick-release-1-22", + "cherrypick-release-1-23" + ], + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/istio/istio/pull/52320", + "repo": "https://github.com/istio/istio", + "pr": "https://github.com/istio/istio/pull/52329" + }, + "reactions": 8, + "comments": 9, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with istio installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:48.637Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/istio/istio-55283-implement-gateway-waypoint-customization.json b/solutions/cncf-generated/istio/istio-55283-implement-gateway-waypoint-customization.json new file mode 100644 index 00000000..33bf8bde --- /dev/null +++ b/solutions/cncf-generated/istio/istio-55283-implement-gateway-waypoint-customization.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "istio-55283-implement-gateway-waypoint-customization", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "istio: Implement Gateway/Waypoint customization", + "description": "Implements https://docs.google.com/document/d/13ZoxgR0CIaOuwyhA4xxI7L1XwsZaDVOmq13K8922zcU/edit?tab=t.0\n\nFixes https://github.com/istio/istio/issues/53964\nSolves the use case for https://github.com/istio/istio/issues/53473 (but doesn't directly implement what they asked for)\nFixes https://github.com/istio/istio/issues/46594\nMaybe resolves https://github.com/istio/istio/issues/54453 (not sure if you can _remove_ a port)\nFixes https://github.com/istio/istio/issues/53189\nFixes https://github.com/is", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Implements https://docs.google.com/document/d/13ZoxgR0CIaOuwyhA4xxI7L1XwsZaDVOmq13K8922zcU/edit?tab=t.0\n\nFixes https://github.com/istio/istio/issues/53964\nSolves the use case for https://github.com/is" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: gateway.networking.k8s.io/v1beta1\r\nkind: Gateway\r\nmetadata:\r\n name: gateway\r\nspec:\r\n infrastructure:\r\n parametersRef:\r\n group: \"\"\r\n kind: ConfigMap\r\n name: gw-options\r\n gatewayClassName: istio\r\n listeners:\r\n - name: default\r\n port: 80\r\n protocol: HTTP\r\n---\r\napiVersion: gateway.networking.k8s.io/v1beta1\r\nkind: HTTPRoute\r\nmetadata:\r\n name: echo\r\nspec:\r\n parentRefs:\r\n - name: gateway\r\n rules:\r\n - backendRefs:\r\n - name: echo\r\n port: 80\r\n---\r\napiV\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> perhaps may want to be able to handle multiple configMaps as a infrastructure parameters ref. presumably people may want to mix and match elements and/or not have a giant configmap.\n\nCurrently the GW API spec only allows a single parametersRef.\n\nIstio does have some precedence for multiple in sidecars, though: https://istio.io/latest/docs/setup/additional-setup/sidecar-injection/#custom-templates-experimental. \n\n> presumably this would work for pdbs and services as well?\n\nYeah today you can customize the 3 types we create (deployment, Serviceaccount, service) and HPA (see note about HPA in description). We probably should do PDB as well as HPA the same way, I didn't add it since I wasn't sure if there would be consensus on adding HPA", + "codeSnippets": [ + "apiVersion: gateway.networking.k8s.io/v1beta1\r\nkind: Gateway\r\nmetadata:\r\n name: gateway\r\nspec:\r\n infrastructure:\r\n parametersRef:\r\n group: \"\"\r\n kind: ConfigMap\r\n name: gw-options\r\n gatewayClassName: istio\r\n listeners:\r\n - name: default\r\n port: 80\r\n protocol: HTTP\r\n---\r\napiVersion: gateway.networking.k8s.io/v1beta1\r\nkind: HTTPRoute\r\nmetadata:\r\n name: echo\r\nspec:\r\n parentRefs:\r\n - name: gateway\r\n rules:\r\n - backendRefs:\r\n - name: echo\r\n port: 80\r\n---\r\napiV", + "apiVersion: gateway.networking.k8s.io/v1beta1\r\nkind: Gateway\r\nmetadata:\r\n name: gateway\r\nspec:\r\n infrastructure:\r\n parametersRef:\r\n group: \"\"\r\n kind: ConfigMap\r\n name: gw-options\r\n gatewayClassName: istio\r\n listeners:\r\n - name: default\r\n port: 80\r\n protocol: HTTP\r\n---\r\napiVersion: gateway.networking.k8s.io/v1beta1\r\nkind: HTTPRoute\r\nmetadata:\r\n name: echo\r\nspec:\r\n parentRefs:\r\n - name: gateway\r\n rules:\r\n - backendRefs:\r\n - name: echo\r\n port: 80\r\n---\r\napiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n name: gw-options\r\ndata:\r\n horizontalPodAutoscaler: |\r\n spec:\r\n minReplicas: 2\r\n maxReplicas: 2\r\n\r\n deployment: |\r\n metadata:\r\n annotations:\r\n john-gw: hi\r\n spec:\r\n replicas: 4\r\n template:\r\n spec:\r\n containers:\r\n - name: istio-proxy\r\n resources:\r\n requests:\r\n cpu: 222m\r\n---\r\napiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n name: istio-default-gatewayclass-istio\r\n namespace: istio-system\r\ndata:\r\n deployment: |-\r\n metadata:\r\n annotations:\r\n john-gwc: hix", + "apiVersion: install.istio.io/v1alpha1\r\nkind: IstioOperator\r\nspec:\r\n values:\r\n gatewayClasses:\r\n istio:\r\n deployment:\r\n metadata:\r\n annotations:\r\n john: hi" + ] + } + }, + "metadata": { + "tags": [ + "istio", + "graduated", + "networking", + "troubleshoot", + "size-xl" + ], + "cncfProjects": [ + "istio" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Ingress", + "Configmap", + "Namespace", + "Horizontalpodautoscaler" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/istio/istio/pull/55283", + "repo": "https://github.com/istio/istio" + }, + "reactions": 9, + "comments": 16, + "synthesizedBy": "regex", + "qualityScore": 65 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with istio installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:26:45.803Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k0s/k0s-1165-add-support-for-external-etcd-cluster.json b/solutions/cncf-generated/k0s/k0s-1165-add-support-for-external-etcd-cluster.json new file mode 100644 index 00000000..128f1aa8 --- /dev/null +++ b/solutions/cncf-generated/k0s/k0s-1165-add-support-for-external-etcd-cluster.json @@ -0,0 +1,75 @@ +{ + "version": "kc-mission-v1", + "name": "k0s-1165-add-support-for-external-etcd-cluster", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "k0s: Add support for external etcd cluster", + "description": "**Issue**\n\n**What this PR Includes**\nThis PR aims to enable users to use external etcd cluster as a storage for k0s. To do that a user has to define the following configuration:\n```\n storage:\n etcd:\n externalCluster:\n endpoints:\n - http://192.168.10.1:2379\n - http://192.168.10.2:2379\n - http://192.168.10.3:2379\n etcdPrefix: k0s-tenant\n type: etcd\n```\nField `endpoints` contains list of URLs that listen on for client requests. `etcdPrefix` is used to ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**Issue**\n\n**What this PR Includes**\nThis PR aims to enable users to use external etcd cluster as a storage for k0s. To do that a user has to define the following configuration:\n```\n storage:\n etc" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nstorage:\r\n etcd:\r\n externalCluster:\r\n endpoints:\r\n - http://192.168.10.1:2379\r\n - http://192.168.10.2:2379\r\n - http://192.168.10.3:2379\r\n etcdPrefix: k0s-tenant\r\n type: etcd\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> In my opinion the first option gives more context and is more intuitive, so I vote for the first option.\n\nI agree, feels more intuitive IMO too.\n\n> Can TLS be optional in case of external cluster or always required?\n\nTheoretically one can configure etcd not to use any TLS, I don't think many people do it though. So I'd say they can be optional.\n\n> I'm going to not support k0s etcd member-list and k0s etcd leave when external cluster is used, because I think that if a user uses an external cluster, we can assume that he knows how to manage this cluster and he has tools to list members. When it comes to leave command, k0s should not support this, because it simply doesn't make sense. Do you agree with me?\n\nYes, I agree.\n\n> We have to decide if etcdPrefix should be available also for internal cluster. I don't see a need to use a prefix in this case, but maybe you know some use case?\n\nI do not know any use case where this would be required. So IMO ok to have it only on the `externalCluster`.\n\n> What do you think about the name of etcdPrefix property? I named it so, to point out it's used as --etcd-prefix, but maybe it could be just prefix or pathPrefix?\n\nBoth `etcdPrefix` and `pathPrefix` sound ok to me.", + "codeSnippets": [ + "storage:\r\n etcd:\r\n externalCluster:\r\n endpoints:\r\n - http://192.168.10.1:2379\r\n - http://192.168.10.2:2379\r\n - http://192.168.10.3:2379\r\n etcdPrefix: k0s-tenant\r\n type: etcd", + "storage:\r\n etcd:\r\n externalCluster:\r\n endpoints:\r\n - http://192.168.10.1:2379\r\n - http://192.168.10.2:2379\r\n - http://192.168.10.3:2379\r\n etcdPrefix: k0s-tenant\r\n type: etcd", + "storage:\r\n type: etcd\r\n etcd:\r\n externalCluster: null\r\n peerAddress: 192.168.68.104", + "storage:\r\n type: etcd\r\n etcd:\r\n externalCluster:\r\n endpoints:\r\n - http://192.168.68.104:2379\r\n - http://192.168.68.105:2379\r\n etcdPrefix: k0s-tenant\r\n caFile: /etc/pki/CA/ca.crt\r\n clientCertFile: /etc/pki/tls/certs/etcd-client.crt\r\n clientKeyFile: /etc/pki/tls/private/etcd-client.key\r\n peerAddress: 192.168.68.104", + "storage:\r\n type: etcd\r\n etcd:\r\n endpoints:\r\n - http://192.168.68.104:2379\r\n - http://192.168.68.105:2379\r\n etcdPrefix: k0s-tenant\r\n caFile: /etc/pki/CA/ca.crt\r\n clientCertFile: /etc/pki/tls/certs/etcd-client.crt\r\n clientKeyFile: /etc/pki/tls/private/etcd-client.key\r\n peerAddress: 192.168.68.104" + ] + } + }, + "metadata": { + "tags": [ + "k0s", + "sandbox", + "orchestration", + "troubleshoot" + ], + "cncfProjects": [ + "k0s" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/k0sproject/k0s/pull/1165", + "repo": "https://github.com/k0sproject/k0s" + }, + "reactions": 2, + "comments": 13, + "synthesizedBy": "regex", + "qualityScore": 65 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with k0s installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:40:12.000Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k3s/k3s-141-preload-a-docker-image-on-the-k3s-node-agents.json b/solutions/cncf-generated/k3s/k3s-141-preload-a-docker-image-on-the-k3s-node-agents.json new file mode 100644 index 00000000..b4cc2a07 --- /dev/null +++ b/solutions/cncf-generated/k3s/k3s-141-preload-a-docker-image-on-the-k3s-node-agents.json @@ -0,0 +1,74 @@ +{ + "version": "kc-mission-v1", + "name": "k3s-141-preload-a-docker-image-on-the-k3s-node-agents", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "k3s: preload a docker image on the k3s node agents", + "description": "This PR adds preloading existing container images ( `docker save` format ) from `/var/lib/rancher/k3s/agent/images`", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR adds preloading existing container images ( `docker save` format ) from `/var/lib/rancher/k3s/agent/images`" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ sudo k3s crictl images\r\nIMAGE TAG IMAGE ID SIZE\r\ndocker.io/library/myimage latest 652014e0a66b3 82MB\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> \n> \n> I have added images which I exported with `docker save` to `/var/lib/rancher/k3s/agent/images` and then restarted k3s. In my k8s deployment I've added the image name to `-image: ` , but k3s still tries to fetch the image from docker.io.\n> \n> I get the following error.\n> \n> `\"rpc error: code = Unknown desc = failed to resolve image \\\"docker.io/library...`\n> \n> This should work right?\n\nHi, I just used this feature today. It looks like an image tagged as `myimage` in docker would result, in the `containerd` embedded in k3s, as :\n\n docker.io/library/myimage\n\nThis can be observed through this command:\n\n```\n$ sudo k3s crictl images\nIMAGE TAG IMAGE ID SIZE\ndocker.io/library/myimage latest 652014e0a66b3 82MB\n```", + "codeSnippets": [ + "$ sudo k3s crictl images\r\nIMAGE TAG IMAGE ID SIZE\r\ndocker.io/library/myimage latest 652014e0a66b3 82MB", + "$ sudo k3s crictl images\r\nIMAGE TAG IMAGE ID SIZE\r\ndocker.io/library/myimage latest 652014e0a66b3 82MB" + ] + } + }, + "metadata": { + "tags": [ + "k3s", + "sandbox", + "orchestration", + "troubleshoot" + ], + "cncfProjects": [ + "k3s" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/k3s-io/k3s/pull/141", + "repo": "https://github.com/k3s-io/k3s" + }, + "reactions": 2, + "comments": 13, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with k3s installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:40:17.384Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/k3s/k3s-3049-add-dual-stack-support-to-k3s.json b/solutions/cncf-generated/k3s/k3s-3049-add-dual-stack-support-to-k3s.json new file mode 100644 index 00000000..89db716c --- /dev/null +++ b/solutions/cncf-generated/k3s/k3s-3049-add-dual-stack-support-to-k3s.json @@ -0,0 +1,102 @@ +{ + "version": "kc-mission-v1", + "name": "k3s-3049-add-dual-stack-support-to-k3s", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "k3s: Add dual stack support to K3S", + "description": "Problem: Although Kubernetes does support dual stack, K3S does not\n\nSolution: Modify K3S so it can both support parameters for dual stack,\nand pass them to the different Kubernetes daemons\n\n#### Proposed Changes ####\n\nAdd dual stack support to K3S\n\n#### Types of Changes ####\n\nNew Feature\n\n#### Verification ####\nYou'll need to start K3S with the `DualStack` feature flag. Also you'll need a different CNI than flannel, because it does not support dual stack (yet). I used Cilium and it worked fine. ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Problem: Although Kubernetes does support dual stack, K3S does not\n\nSolution: Modify K3S so it can both support parameters for dual stack,\nand pass them to the different Kubernetes daemons\n\n#### Propo" + }, + { + "title": "Only ipv4 stack as the origin.", + "description": "Only ipv4 stack as the origin." + }, + { + "title": "Only ipv6 stack", + "description": "Only ipv6 stack" + }, + { + "title": "Dual stack", + "description": "Dual stack" + }, + { + "title": "setting flanneld daemon with `--kube-subnet-mgr --auto-detect-ipv6`", + "description": "setting flanneld daemon with `--kube-subnet-mgr --auto-detect-ipv6`" + }, + { + "title": "settting `EnableIPv6` and `IPv6Network` in `net-conf.json`, like the", + "description": "settting `EnableIPv6` and `IPv6Network` in `net-conf.json`, like the" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n/usr/local/bin/k3s server --disable traefik –-flannel-backend=none --disable-network-policy --disable servicelb --no-flannel --disable-kube-proxy \\\r\n\t\t--node-ip \\\r\n\t\t--node-ip \\\r\n\t\t--kube-apiserver-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--kube-controller-manager-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--kubelet-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--cluster-cidr ',' \\\r\n\t\t--service-cidr ',= 3.12.\n\nIt also need flannel cni plugin to support dual stack ip allocation, so it \ndepends on: [https://github.com/containernetworking/plugins/pull/570](https://github.com/containernetworking/plugins/pull/570 )\n\n#248", + "codeSnippets": [ + "/usr/local/bin/k3s server --disable traefik –-flannel-backend=none --disable-network-policy --disable servicelb --no-flannel --disable-kube-proxy \\\r\n\t\t--node-ip \\\r\n\t\t--node-ip \\\r\n\t\t--kube-apiserver-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--kube-controller-manager-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--kubelet-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--cluster-cidr ',' \\\r\n\t\t--service-cidr ', \\\r\n\t\t--node-ip \\\r\n\t\t--kube-apiserver-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--kube-controller-manager-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--kubelet-arg 'feature-gates=IPv6DualStack=true' \\\r\n\t\t--cluster-cidr ',' \\\r\n\t\t--service-cidr ',' \\", + "{\r\n \"EnableIPv4\": true,\r\n \"EnableIPv6\": true,\r\n \"Network\": \"172.16.0.0/16\",\r\n \"IPv6Network\": \"fc00::/48\",\r\n \"Backend\": {\r\n \"Type\": \"vxlan\"\r\n }\r\n}", + "enable-ipv6: \"true\"\r\n cluster-pool-ipv6-cidr: \"fd00:5000::/64\" \r\n cluster-pool-ipv6-mask-size: \"64\"" + ] + } + }, + "metadata": { + "tags": [ + "k3s", + "sandbox", + "orchestration", + "troubleshoot" + ], + "cncfProjects": [ + "k3s" + ], + "targetResourceKinds": [ + "Service", + "Node" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/k3s-io/k3s/pull/3049", + "repo": "https://github.com/k3s-io/k3s", + "pr": "https://github.com/flannel-io/flannel/pull/1398" + }, + "reactions": 3, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 69 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with k3s installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:40:16.011Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-2418-add-predictkube-scaler.json b/solutions/cncf-generated/keda/keda-2418-add-predictkube-scaler.json new file mode 100644 index 00000000..9084ebb3 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-2418-add-predictkube-scaler.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "keda-2418-add-predictkube-scaler", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "keda: Add PredictKube scaler", + "description": "This PR adds a new PredictKube scaler, ready-to-use for the community.\nPredictKube - is a tool for proactive scaling based on the AI model’s prediction. \n\nRelated docs PR: [https://github.com/kedacore/keda-docs/pull/617](https://github.com/kedacore/keda-docs/pull/617)\n\nThis is an example of what the TriggerAuthentication and the ScaledObject definitions would look like:\n```yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: predictkube-secrets\ntype: Opaque\ndata:\n apiKey: # Required: base64 encod", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR adds a new PredictKube scaler, ready-to-use for the community.\nPredictKube - is a tool for proactive scaling based on the AI model’s prediction. \n\nRelated docs PR: [https://github.com/kedacore" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: v1\r\nkind: Secret\r\nmetadata:\r\n name: predictkube-secrets\r\ntype: Opaque\r\ndata:\r\n apiKey: # Required: base64 encoded value of PredictKube apiKey\r\n---\r\napiVersion: keda.sh/v1alpha1\r\nkind: TriggerAuthentication\r\nmetadata:\r\n name: keda-trigger-auth-predictkube-secret\r\nspec:\r\n secretTargetRef:\r\n - parameter: apiKey\r\n name: predictkube-secrets\r\n key: apiKey\r\n---\r\napiVersion: keda.sh/v1alpha1\r\nkind: ScaledObject\r\nmetadata:\r\n name: example-app-scaler\r\nspec:\r\n scaleTargetRef:\r\n \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kedacore/keda-docs/pull/617. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR adds documentation for a new PredictKube Scaler.\n\n### Checklist\n\nRelates to https://github.com/kedacore/keda/pull/2418 & https://github.com/kedacore/keda/issues/2458", + "codeSnippets": [ + "apiVersion: v1\r\nkind: Secret\r\nmetadata:\r\n name: predictkube-secrets\r\ntype: Opaque\r\ndata:\r\n apiKey: # Required: base64 encoded value of PredictKube apiKey\r\n---\r\napiVersion: keda.sh/v1alpha1\r\nkind: TriggerAuthentication\r\nmetadata:\r\n name: keda-trigger-auth-predictkube-secret\r\nspec:\r\n secretTargetRef:\r\n - parameter: apiKey\r\n name: predictkube-secrets\r\n key: apiKey\r\n---\r\napiVersion: keda.sh/v1alpha1\r\nkind: ScaledObject\r\nmetadata:\r\n name: example-app-scaler\r\nspec:\r\n scaleTargetRef:", + "apiVersion: v1\r\nkind: Secret\r\nmetadata:\r\n name: predictkube-secrets\r\ntype: Opaque\r\ndata:\r\n apiKey: # Required: base64 encoded value of PredictKube apiKey\r\n---\r\napiVersion: keda.sh/v1alpha1\r\nkind: TriggerAuthentication\r\nmetadata:\r\n name: keda-trigger-auth-predictkube-secret\r\nspec:\r\n secretTargetRef:\r\n - parameter: apiKey\r\n name: predictkube-secrets\r\n key: apiKey\r\n---\r\napiVersion: keda.sh/v1alpha1\r\nkind: ScaledObject\r\nmetadata:\r\n name: example-app-scaler\r\nspec:\r\n scaleTargetRef:\r\n name: example-app\r\n pollingInterval: 60\r\n cooldownPeriod: 300\r\n minReplicaCount: 3\r\n maxReplicaCount: 50\r\n triggers:\r\n - type: predictkube\r\n metadata:\r\n predictHorizon: \"2h\"\r\n historyTimeWindow: \"7d\" # We recomend to use minimum 7-14 days time window as historical data\r\n prometheusAddress: http://kube-prometheus-stack-prometheus.monitoring:9090\r\n metricName: http_requests_total # Note: name to identify the metric, generated value would be `predictkube-http_requests_total`\r\n query: sum(irate(http_requests_total{pod=~\"example-app-.*\"}[2m]))\r\n queryStep: \"2m\" # Note: query step duration for range prometheus queries\r\n threshold: '2000' # Value to start scaling for\r\n authenticationRef:\r\n name: keda-trigger-auth-predictkube-secret" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Secret" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/kedacore/keda/pull/2418", + "repo": "https://github.com/kedacore/keda", + "pr": "https://github.com/kedacore/keda-docs/pull/617" + }, + "reactions": 11, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with keda installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:39.890Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keda/keda-6191-add-scaler-for-temporal.json b/solutions/cncf-generated/keda/keda-6191-add-scaler-for-temporal.json new file mode 100644 index 00000000..8b1b32e3 --- /dev/null +++ b/solutions/cncf-generated/keda/keda-6191-add-scaler-for-temporal.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "keda-6191-add-scaler-for-temporal", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "keda: add scaler for temporal", + "description": "Implement a temporal scaler\n\n### Checklist\n\n- [N/A] A PR is opened to update our Helm chart ([repo](https://github.com/kedacore/charts)) *(if applicable, ie. when deployment manifests are modified)*\n\nDo", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Implement a temporal scaler\n\n### Checklist\n\n- [N/A] A PR is opened to update our Helm chart ([repo](https://github.com/kedacore/charts)) *(if applicable, ie. when deployment manifests are modified)*\n\n" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n2024-10-08T08:54:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:54:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:55:2\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kedacore/keda/pull/4863. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Implement a temporal scaler \n\n### Checklist\n\nRelates to #4724", + "codeSnippets": [ + "2024-10-08T08:54:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:54:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:55:2", + "2024-10-08T08:54:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:54:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:55:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:55:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:56:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:56:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:57:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:57:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:57:57Z INFO scaleexecutor Successfully updated ScaleTarget {\"scaledobject.Name\": \"connector-mysql-2d9c6761\", \"scaledObject.Namespace\": \"borneo\", \"scaleTarget.Name\": \"connector-mysql-2d9c6761\", \"Original Replicas Count\": 0, \"New Replicas Count\": 1}\r\n2024-10-08T08:57:58Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:13Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:28Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:43Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:57Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:58:58Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:59:13Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}\r\n2024-10-08T08:59:27Z INFO temporal_scaler Getting Temporal queue size {\"type\": \"ScaledObject\", \"namespace\": \"borneo\", \"name\": \"connector-mysql-2d9c6761\", \"queueName\": \"connector/devdp-breo/2d9c6761-7c8c-4958-8d94-767064f873f5\"}", + "temporal_test.go:268:\r\n \tError Trace:\t/Users/robholland/Developer/github.com/kedacore/keda/tests/scalers/temporal/temporal_test.go:268\r\n \t \t\t\t\t/Users/robholland/Developer/github.com/kedacore/keda/tests/scalers/temporal/temporal_test.go:241\r\n \tError: \tShould be true\r\n \tTest: \tTestTemporalScaler\r\n \tMessages: \treplica count should be 1 after 3 minutes", + "> temporal_test.go:268:\r\n> \tError Trace:\t/Users/robholland/Developer/github.com/kedacore/keda/tests/scalers/temporal/temporal_test.go:268\r\n> \t \t\t\t\t/Users/robholland/Developer/github.com/kedacore/keda/tests/scalers/temporal/temporal_test.go:241\r\n> \tError: \tShould be true\r\n> \tTest: \tTestTemporalScaler\r\n> \tMessages: \treplica count should be 1 after 3 minutes\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "keda", + "graduated", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "keda" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/kedacore/keda/pull/6191", + "repo": "https://github.com/kedacore/keda", + "pr": "https://github.com/kedacore/keda/pull/4863" + }, + "reactions": 13, + "comments": 43, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with keda installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:27:37.943Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/keylime/keylime-319-update-verifier-rest-api-to-return-error-for-invalid-exclude-list.json b/solutions/cncf-generated/keylime/keylime-319-update-verifier-rest-api-to-return-error-for-invalid-exclude-list.json new file mode 100644 index 00000000..b6302b52 --- /dev/null +++ b/solutions/cncf-generated/keylime/keylime-319-update-verifier-rest-api-to-return-error-for-invalid-exclude-list.json @@ -0,0 +1,76 @@ +{ + "version": "kc-mission-v1", + "name": "keylime-319-update-verifier-rest-api-to-return-error-for-invalid-exclude-list", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "keylime: Update verifier REST API to return error for invalid exclude list", + "description": "Exclude list example:\n\n```shell\n[root@keylime keylime-dev]# cat ~/excludes.txt\n\n*\n```\n\nError from the verifier:\n```shell\n2020-06-05 20:40:21.875 - keylime.cloudverifier - WARNING - Invalid regex: nothing to repeat. Exclude list regex is misformatted. Please correct the issue and try again.\n```\n\nError from the tenant:\n```shell\n[root@keylime keylime-dev]# python keylime/cmd/tenant.py -v 127.0.0.1 -t 127.0.0.1 -tp 9002 -f /root/excludes.txt -u D432FBB3-D2F1-4A97-9EF7-75BD81C00000 --whitelist /root/", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Exclude list example:\n\n```shell\n[root@keylime keylime-dev]# cat ~/excludes.txt\n\n*\n```\n\nError from the verifier:\n```shell\n2020-06-05 20:40:21.875 - keylime.cloudverifier - WARNING - Invalid regex: noth" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n[root@keylime keylime-dev]# cat ~/excludes.txt\r\n/root/keylime-dev/.*\r\n*\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> but at the same time I would hope regular expressions would help make the exclude list more concise.\n\nI agree, this is an edge case so I don't think we should sweat over this one for long. \nI have entries like this:\n\n```\nfb01d3787135d1fbc1c333f02edcf5c9ff59f864f3f4d3e51a1cc0315b764bcd /etc/fonts/conf.d/65-nonlatin.conf\n0fe8db4f86a29df2c348b009a26aeb1f7b0686315e7310e3301e1d83466af23b /etc/fonts/conf.d/25-no-bitmap-fedora.conf\n1a17eaeb44bdc367f16939dfb2ca1a00a57a9b788b9057a80c656667fa41721e /etc/fonts/conf.d/45-latin.conf\n54a5935ea79c66e07faf743b81f2647eca849800b2b0422e84cd66527a75066a /etc/fonts/conf.d/90-synthetic.conf\n1fac58aa3c865e9b05088f94f7a4684d8a60d134421690cfd04842eb00fe1332 /etc/fonts/conf.d/10-hinting-slight.conf\n```\n\nIf a user entered this as a bug, we would tell them to use `/etc/fonts/conf.d/*`. I would rather educate users then take a performance hit. The main thing for me is that we keep mangled regex out, and you have achieved that goal with this patch.\n\nIs it possible to just return `Exclude list regex is misformatted. Please correct the issue and try again`, but without the regex, e.g. remove `'(/root/keylime-dev/.*)|(*)'` to use your example:\n\n```\n[root@keylime keylime-dev]# python keylime/cmd/tenant.py -v 127.0.0.1 -t 127.0.0.1 -tp 9002 -f /root/excludes.txt -u D432FBB3-D2F1-4A97-9EF7-75BD81C00000 --whitelist /root/whitelist.txt --exclude /root/excludes.txt -c add\nUsing config file /root/keylime-dev/keylime.conf\n2020-06-03 05:49:59.202 - keylime.te", + "codeSnippets": [ + "[root@keylime keylime-dev]# cat ~/excludes.txt\n\n*", + "[root@keylime keylime-dev]# cat ~/excludes.txt\r\n/root/keylime-dev/.*\r\n*", + "[root@keylime keylime-dev]# cat ~/excludes.txt\r\n/root/keylime-dev/.*\r\n*", + "2020-06-05 20:40:21.875 - keylime.cloudverifier - WARNING - Invalid regex: nothing to repeat. Exclude list regex is misformatted. Please correct the issue and try again.", + "[root@keylime keylime-dev]# python keylime/cmd/tenant.py -v 127.0.0.1 -t 127.0.0.1 -tp 9002 -f /root/excludes.txt -u D432FBB3-D2F1-4A97-9EF7-75BD81C00000 --whitelist /root/whitelist.txt --exclude /root/excludes.txt -c add\r\nUsing config file /root/keylime-dev/keylime.conf\r\n2020-06-05 20:40:20.539 - keylime.tenant - WARNING - CAUTION: using default password for private key, please set private_key_pw to a strong password\r\n2020-06-05 20:40:20.539 - keylime.tenant - INFO - Setting up client TLS in /var/lib/keylime/cv_ca\r\n2020-06-05 20:40:20.546 - keylime.tenant - INFO - TPM PCR Mask from policy is 0x408000\r\n2020-06-05 20:40:20.546 - keylime.tenant - INFO - TPM PCR Mask from policy is 0x808000\r\n2020-06-05 20:40:20.671 - keylime.ima - WARNING - No boot_aggregate value found in whitelist, adding an empty one\r\n2020-06-05 20:40:21.876 - keylime.tenant - ERROR - Response code 400: Invalid regex: nothing to repeat. Exclude list regex is misformatted. Please correct the issue and try again.\r\n2020-06-05 20:40:21.876 - keylime.tenant - ERROR - POST command response: 400 Unexpected response from Cloud Verifier: {'code': 400, 'status': 'Invalid regex: nothing to repeat. Exclude list regex is misformatted. Please correct the issue and try again.', 'results': {}}" + ] + } + }, + "metadata": { + "tags": [ + "keylime", + "sandbox", + "app-definition", + "troubleshoot", + "released" + ], + "cncfProjects": [ + "keylime" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/keylime/keylime/pull/319", + "repo": "https://github.com/keylime/keylime" + }, + "reactions": 1, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with keylime installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:40:57.676Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kmesh/kmesh-1094-update-hostname-and-record-unknown-services-of-service-metrics.json b/solutions/cncf-generated/kmesh/kmesh-1094-update-hostname-and-record-unknown-services-of-service-metrics.json new file mode 100644 index 00000000..1016786f --- /dev/null +++ b/solutions/cncf-generated/kmesh/kmesh-1094-update-hostname-and-record-unknown-services-of-service-metrics.json @@ -0,0 +1,89 @@ +{ + "version": "kc-mission-v1", + "name": "kmesh-1094-update-hostname-and-record-unknown-services-of-service-metrics", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kmesh: update hostname and record unknown services of service metrics", + "description": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\nHere are some issues tha remain to be disccused:\n- When a service is not found when building service metrics, I record the `dstIp` in `destination_service`, I'm not sure if it is appropriate.\n- When recording waypoint metrics, services can not found since the `dstIp` is 15019, but only 15021 and 15008 are defined in waypoint services, so the following logic fails every time. Ztunnel uses act", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\nHere are some issues tha remain to be disccused:\n- When a service is not found when building s" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n- And a small question that why the metric are separated into service metric and workload metric?\r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kmesh-net/kmesh/pull/1101. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\nAdd kiali addon sample and adjust prometheus_recording_istio sample for a better view of kiali\n**Which issue(s) this PR fixes**:\n\n**Special notes for your reviewer**:\nThis PR mainly fixes of the already existed `./samples/addon/prometheus_recoding_istio.yaml` and further provide a `kiali.yaml` for a sample Kiali deploy.\nWe can get a pretty decent sample traffic graph in Kiali with this and #1094 both resolved. The final look of the traffic graph will be like this\n\"image\"\n\n**Does this PR introduce a user-facing change?**:", + "codeSnippets": [ + "- And a small question that why the metric are separated into service metric and workload metric?\r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n", + "- And a small question that why the metric are separated into service metric and workload metric?\r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n", + "Seems like when kiali drawing waypoint, it connect the waypoint to the destination service using the `destination_service_name` tag, but if we use the waypoint service as `destination_service_name`, I'm not sure if kiali will work as expected.\r\n\r\n![image](https://github.com/user-attachments/assets/9cbae83f-37c9-4884-b67a-f127e5ac8d2f)\r\n\nCan we add actual destination information in `tcp_probe_info`? Maybe add `actual_daddr` and `actual_dport` in the `bpf_sock_tuple` struct so that we can know if the traffic is redirected? (Although this information is only need in metrics, for now)", + ">When recording waypoint metrics, services can not found since the dstIp is 15019, but only 15021 and 15008 are defined in waypoint services, so the following logic fails every time. Ztunnel uses actual service as destination service instead of the waypoint service, but seems like kmesh can not achieve that. I'm not sure how to handle this situation.\r\n\r\n\r\nNow we support deploying waypoint via kmeshctl, why cannot we expose 15019 in the `gateway` object cc @YaoZengzeng \r\n\n> It is a little hard to review, can you do the cleanup/refact in a separate pr, can keep the critical fix here\r\n\r\nThis pr only contains the critical fix, but I made some refactorings to make the code tidy.\r\n\r\nFor example, I extract the following code to a new method `withDestination` since this logic is duplicated in `buildWorkloadMetric` and `buildServiceMetric`. Maybe I can make some explanation in the community meeting? 🤔" + ] + } + }, + "metadata": { + "tags": [ + "kmesh", + "sandbox", + "app-definition", + "troubleshoot", + "kind-bug", + "lgtm", + "approved", + "size-xl" + ], + "cncfProjects": [ + "kmesh" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Job", + "Namespace", + "Node" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/kmesh-net/kmesh/pull/1094", + "repo": "https://github.com/kmesh-net/kmesh", + "pr": "https://github.com/kmesh-net/kmesh/pull/1101" + }, + "reactions": 1, + "comments": 10, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kmesh installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:41:11.491Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative-eventing/knative-eventing-5818-moving-placeable-type-and-new-scheduler-to-eventing-core.json b/solutions/cncf-generated/knative-eventing/knative-eventing-5818-moving-placeable-type-and-new-scheduler-to-eventing-core.json new file mode 100644 index 00000000..81ba547e --- /dev/null +++ b/solutions/cncf-generated/knative-eventing/knative-eventing-5818-moving-placeable-type-and-new-scheduler-to-eventing-core.json @@ -0,0 +1,87 @@ +{ + "version": "kc-mission-v1", + "name": "knative-eventing-5818-moving-placeable-type-and-new-scheduler-to-eventing-core", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "knative-eventing: Moving Placeable type and new Scheduler to eventing core", + "description": "Fixes https://github.com/knative-sandbox/eventing-kafka/issues/925\n\n## Proposed Changes\n\n- :gift: Controller reads and validates the policy profile (containing predicates and priorities) for both scheduling and descheduling from ConfigMaps.\n- :gift: Scheduler imports all the available plugins which invokes their init() and registers all the core and source specific (kafka) plugins.\n- :gift: Scheduler runs all the predicates and priorities specified in the policy (configmap) file to eliminate all", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Fixes https://github.com/knative-sandbox/eventing-kafka/issues/925\n\n## Proposed Changes\n\n- :gift: Controller reads and validates the policy profile (containing predicates and priorities) for both sche" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n**Docs**\r\n\r\n\r\n\r\n\nSigned-off-by: aavarghese avarghese@us.ibm.com\r\n\r\nFixes knative-sandbox/eventing-kafka#925\r\n\r\nSee https://github.com/knative/eventing/pull/5818, https://github.com/knative-sandbox/eventing-kafka/pull/768\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\nSigned-off-by: aavarghese avarghese@us.ibm.com\r\n\r\nFixes knative-sandbox/eventing-kafka#925\r\n\r\nSee https://github.com/knative/eventing/pull/5818, https://github.com/knative-sandbox/eventing-kafka/pull/768\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\nSigned-off-by: aavarghese avarghese@us.ibm.com\r\n\r\nFixes knative-sandbox/eventing-kafka#925\r\n\r\nSee https://github.com/knative/eventing/pull/5818, https://github.com/knative-sandbox/eventing-kafka/pull/768\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n**Release Note**\r\n\r\n", + "\r\n\n@aavarghese: GitHub didn't allow me to request PR reviews from the following users: starting, the, in, PR, for, eventing, move, this.\n\nNote that only [knative members](https://github.com/orgs/knative/people) and repo collaborators can review this PR, and authors cannot review their own PRs.\n\n
\n\nIn response to [this](https://github.com/knative/eventing/pull/5818#issuecomment-944656620):\n\n>/cc @lionelvillard @pierDipi starting the move in this PR for eventing\n\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n/cc @lionelvillard @pierDipi\r\nstarting the move in this PR for eventing\n# [Codecov](https://codecov.io/gh/knative/eventing/pull/5818?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) Report\n> Merging [#5818](https://codecov.io/gh/knative/eventing/pull/5818?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) (2e1efc1) into [main](https://codecov.io/gh/knative/eventing/commit/c804fd2b94fbd70c2639eea37e8e69a8933cc193?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) (c804fd2) will **decrease** coverage by `0.34%`.\n> The diff coverage is `79.44%`.\n\n> :exclamation: Current head 2e1efc1 differs from pull request most recent head 31a3c56. Consider uploading reports for the commit 31a3c56 to get more accurate results\n[![Impacted file tree graph](https://codecov.io/gh/knative/eventing/pull/5818/graphs/tree.svg?width=650&height=150&src=pr&token=PKyIzvr3mx&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative)](https://codecov.io/gh/knative/eventing/pull/5818?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative)" + ] + } + }, + "metadata": { + "tags": [ + "knative-eventing", + "graduated", + "app-definition", + "troubleshoot", + "approved", + "lgtm", + "size-xxl", + "cla--yes" + ], + "cncfProjects": [ + "knative-eventing" + ], + "targetResourceKinds": [ + "Pod", + "Configmap", + "Statefulset", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/knative/eventing/pull/5818", + "repo": "https://github.com/knative/eventing", + "pr": "https://github.com/knative-extensions/eventing-kafka/pull/942" + }, + "reactions": 2, + "comments": 19, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with knative-eventing installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:41.721Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative-eventing/knative-eventing-7525-authenticate-requests-from-pingsources.json b/solutions/cncf-generated/knative-eventing/knative-eventing-7525-authenticate-requests-from-pingsources.json new file mode 100644 index 00000000..2a258ac3 --- /dev/null +++ b/solutions/cncf-generated/knative-eventing/knative-eventing-7525-authenticate-requests-from-pingsources.json @@ -0,0 +1,87 @@ +{ + "version": "kc-mission-v1", + "name": "knative-eventing-7525-authenticate-requests-from-pingsources", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "knative-eventing: Authenticate requests from PingSources", + "description": "## Proposed Changes\n\n(Prerequisites: OIDC mode is enabled, and sink has a defined audience)\n- Binds the source's service account to allow the source to create a JWT token\n- Edited existing tests to add sinkAudience\n\n### Pre-review Checklist\n\n**Release Note**\n\n**Docs**", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Proposed Changes\n\n(Prerequisites: OIDC mode is enabled, and sink has a defined audience)\n- Binds the source's service account to allow the source to create a JWT token\n- Edited existing tests to ad" + }, + { + "title": "You need to set the TokenProvider in the adapter.ClientConfig, which gets use...", + "description": "You need to set the TokenProvider in the adapter.ClientConfig, which gets used by the adapter in https://github.com/knative/eventing/blob/c28078eca2a1ec94138794f64f274d25e6292354/pkg/adapter/mtping/runner.go#L211-L217" + }, + { + "title": "You need to pass/set the Audience and OIDCServiceAccountName in the env struc...", + "description": "You need to pass/set the Audience and OIDCServiceAccountName in the env struct, which gets passed to the ClientConfig [here](https://github.com/knative/eventing/blob/c28078eca2a1ec94138794f64f274d25e6292354/pkg/adapter/mtping/runner.go#L212) too. You can get both from the source status (`source.Status.SinkAudience` and `source.Status.Auth.ServiceAccountName`). (Hint: currently, we are missing to set the sink audience correctly. This is addressed in #7553. So you might need to rebase after it merged)" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/knative/eventing/pull/7525?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative).\n\nHello @Zazzscoot,\r\nthanks for your update. I\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Hello @Zazzscoot,\nthanks for your update. It seems the e2e test failures are because you're missing the implementation for the pingsource:\n\n1. You need to set the TokenProvider in the adapter.ClientConfig, which gets used by the adapter in https://github.com/knative/eventing/blob/c28078eca2a1ec94138794f64f274d25e6292354/pkg/adapter/mtping/runner.go#L211-L217\n\n You can get the tokenProvider from `a.clientConfig.TokenProvider`.\n\n2. You need to pass/set the Audience and OIDCServiceAccountName in the env struct, which gets passed to the ClientConfig [here](https://github.com/knative/eventing/blob/c28078eca2a1ec94138794f64f274d25e6292354/pkg/adapter/mtping/runner.go#L212) too. You can get both from the source status (`source.Status.SinkAudience` and `source.Status.Auth.ServiceAccountName`). (Hint: currently, we are missing to set the sink audience correctly. This is addressed in #7553. So you might need to rebase after it merged)\n\nAnd one thing about your e2e test: You are not passing the expected audience of the sink, when you setup the PingSource in https://github.com/knative/eventing/blob/c28078eca2a1ec94138794f64f274d25e6292354/test/auth/features/oidc/pingsource.go#L38. You can pass the audience via the following: \n```\npingsource.WithSink(&duckv1.Destination{\n\tRef: service.AsKReference(sink),\n\tAudience: &sinkAudience,\n})))\n```\nAnyhow, `pingsource.WithSink()` does not set the audience of the sink yet. You can check e.g. on https://github.com/knative/eventing/blob/7f43f16", + "codeSnippets": [ + "\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/knative/eventing/pull/7525?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative).\n\nHello @Zazzscoot,\r\nthanks for your update. I", + "\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/knative/eventing/pull/7525?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative).\n\nHello @Zazzscoot,\r\nthanks for your update. It seems the e2e test failures are because you're missing the implementation for the pingsource:\r\n\r\n1. You need to set the TokenProvider in the adapter.ClientConfig, which gets used by the adapter in https://github.com/knative/eventing/blob/c28078eca2a1ec94138794f64f274d25e6292354/pkg/adapter/mtping/runner.go#L211-L217\r\n\r\n You can get the tokenProvider from `a.clientConfig.TokenProvider`.\r\n\r\n2. You need to pass/set the Audience and OIDCServiceAccountName in the env struct, which gets passed to the ClientConfig [here](https://github.com/knative/eventing/blob/c28078eca2a1ec94138794f64f274d25e6292354/pkg/adapter/mtping/runner.go#L212) too. You can get both from the source status (`source.Status.SinkAudience` and `source.Status.Auth.ServiceAccountName`). (Hint: currently, we are missing to set the sink audience correctly. This is addressed in #7553. So you might need to rebase after it merged)\r\n\r\nAnd one thing about your e2e test: You are not passing the expected audience of the sink, when you setup the PingSource in https://github.com/knative/eventing/blob/c28078eca2a1ec94138794f64f274d25e6292354/test/auth/features/oidc/pingsource.go#L38. You can pass the audience via the following:" + ] + } + }, + "metadata": { + "tags": [ + "knative-eventing", + "graduated", + "app-definition", + "troubleshoot", + "approved", + "size-l", + "lgtm", + "area-test-and-release", + "ok-to-test" + ], + "cncfProjects": [ + "knative-eventing" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/knative/eventing/pull/7525", + "repo": "https://github.com/knative/eventing" + }, + "reactions": 2, + "comments": 33, + "synthesizedBy": "regex", + "qualityScore": 66 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with knative-eventing installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:38.296Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/knative/knative-12715-add-support-for-topology-spread-constraint.json b/solutions/cncf-generated/knative/knative-12715-add-support-for-topology-spread-constraint.json new file mode 100644 index 00000000..122b4c79 --- /dev/null +++ b/solutions/cncf-generated/knative/knative-12715-add-support-for-topology-spread-constraint.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "knative-12715-add-support-for-topology-spread-constraint", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "knative: Add support for topology spread constraint", + "description": "Fixes https://github.com/knative/serving/issues/12639. \nKnative serving currently does not allow specifying `topologySpreadConstraints` in the pod spec as noted by this issue \n\n**I tested this by locally building knative and applying it to a k8s cluster. Topology spread constraints were able to work after enabling them through the config-feature config map**\n\n## Proposed Changes\n\n*\n*\n*\n\n**Release Note**", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Fixes https://github.com/knative/serving/issues/12639. \nKnative serving currently does not allow specifying `topologySpreadConstraints` in the pod spec as noted by this issue \n\n**I tested this by loca" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nThanks for your pull request! It looks like this may be your first contribution to a Google open source project. Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\nFor more information, open the [CLA check for this pull request](https://github.com/knative/serving/pull/12715/checks?check_run_id=5484246303).\nWelcome @stevenchen-db! It looks like this is your first PR to knative/serving 🎉\nHi @stevenchen-db. Thanks for your PR.\n\nI'm waiting for a [k\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> What is the process to update the spec in https://github.com/knative/specs/blob/main/specs/serving/knative-api-specification-1.0.md#revisiontemplatespec ?\n\nThis currently goes through the Knative Trademark Committee.\n\n> Is there a place where such changes could be collected for a future update of the spec (like with an OPTIONAL keyword), or can optional fields be ignored in the spec for now and it is a second step to include them in the spec? (but might be good to collect them in a separate list for reference and future updates of the spec)\n\nChanges to the Knative spec apply to all distributions. Fields that are Kubernetes specific will probably never be accepted since K8s is really an implementation detail. And that's ok the OSS implementation can be a super set of the spec.\n\nBut since the OSS distribution still values a clear separation of developer/operator concerns some fields may always stay behind a feature flag (like this one). \n\n> I suggest updating https://github.com/knative/serving/blob/main/DEVELOPMENT.md?plain=1#L230-L236 in case we need to track those changed fields separately.\n\nThis field is covered by the `preserveUnknownFields` at the PodSpec level", + "codeSnippets": [ + "Thanks for your pull request! It looks like this may be your first contribution to a Google open source project. Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\nFor more information, open the [CLA check for this pull request](https://github.com/knative/serving/pull/12715/checks?check_run_id=5484246303).\nWelcome @stevenchen-db! It looks like this is your first PR to knative/serving 🎉\nHi @stevenchen-db. Thanks for your PR.\n\nI'm waiting for a [k", + "Thanks for your pull request! It looks like this may be your first contribution to a Google open source project. Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\nFor more information, open the [CLA check for this pull request](https://github.com/knative/serving/pull/12715/checks?check_run_id=5484246303).\nWelcome @stevenchen-db! It looks like this is your first PR to knative/serving 🎉\nHi @stevenchen-db. Thanks for your PR.\n\nI'm waiting for a [knative](https://github.com/orgs/knative/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://github.com/orgs/knative/people) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=knative%2Fserving).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\n/ok-to-test\r\n\n> Since this PR is changing the PodSpec, I believe you'll need to [update the schemas](https://github.com/knative/serving/blob/main/DEVELOPMENT.md?plain=1#L230-L236) as well\r\n\r\n@psschwei I didn't see other things like node affinity or container runtime in `hack/schemapatch-config.yaml`. Is there any reason why topologySpreadConstraints need to be in this file?\n> Is there any reason why topologySpreadConstraints need to be in this file?\r\n\r\nSince you're behind a feature flag, [it doesn't](https://github.com/knative/serving/blob/main/DEVELOPMENT.md?plain=1#L235-L236). And since `preserveUnknownFields` is already set to true, doesn't look like there's anything needed on that front for this one (sorry about that, it's been a while since I looked at the schema job...)\r\n\n# [Codecov](https://codecov.io/gh/knative/serving/pull/12715?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) Report\n> Merging [#12715](https://codecov.io/gh/knative/serving/pull/12715?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) (04cebd0) into [main](https://codecov.io/gh/knative/serving/commit/0753bb1c55bd58d26b71f33db9776d78d92bff30?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=knative) (0753bb1) will **increase** coverage by `0.00%`.\n> The diff coverage is `100.00%`." + ] + } + }, + "metadata": { + "tags": [ + "knative", + "graduated", + "app-definition", + "troubleshoot", + "area-api", + "size-l", + "lgtm", + "area-networking", + "approved", + "area-test-and-release", + "ok-to-test" + ], + "cncfProjects": [ + "knative" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/knative/serving/pull/12715", + "repo": "https://github.com/knative/serving" + }, + "reactions": 3, + "comments": 14, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with knative installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:28:17.038Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kserve/kserve-4603-feat-add-catboost-model-format-supported-by-mlserver-runtime.json b/solutions/cncf-generated/kserve/kserve-4603-feat-add-catboost-model-format-supported-by-mlserver-runtime.json new file mode 100644 index 00000000..6a0a0ec3 --- /dev/null +++ b/solutions/cncf-generated/kserve/kserve-4603-feat-add-catboost-model-format-supported-by-mlserver-runtime.json @@ -0,0 +1,85 @@ +{ + "version": "kc-mission-v1", + "name": "kserve-4603-feat-add-catboost-model-format-supported-by-mlserver-runtime", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kserve: feat: add CatBoost model format supported by MLServer runtime", + "description": "**What this PR does / why we need it**:\n\nAdds support of `CatBoost` model format for `MlServer` runtime\n\n**Which issue(s) this PR fixes**:\n\n**Type of changes**\n\n**Feature/Issue validation/testing**:\n\nCreated `isvc` resource with provided model format and fake `storageUri`, just to verify that controller picked appropriate `ClusterServingRuntime`:\n\n```sh\nkubectl create namespace kitty\n\nkubectl -n kitty apply -f - <(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #\n\n**Type of changes**\nPlease delete options that are not relevant.\n\n**Feature/Issue validation/testing**:\n\nPlease describe the tests that you ran to verify your changes and relevant result summary. Provide instructions so it can be reproduced.\nPlease also list any relevant details for your test configuration.\n\n- Logs\n\n**Special notes for your reviewer**:\n\n1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.\n\n**Checklist**:\n\n**Release note**:\n\n**Re-running failed tests**\n\n- `/rerun-all` - rerun all failed workflows.\n- `/rerun-workflow ` - rerun a specific failed workflow. Only one workflow name can be specified. Multiple /rerun-workflow commands are all", + "codeSnippets": [ + "kubectl create namespace kitty\r\n\r\nkubectl -n kitty apply -f - <` - rerun a specific failed workflow. Only one workflow name can be specified. Multiple /rerun-workflow commands are allowed per comment.\r\n\n…nfigs\r\n\r\n\r\n\r\n**What this PR does / why we need it**:\r\n\r\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\r\nFixes #\r\n\r\n**Type of changes**\r\nPlease delete options that are not relevant.\r\n\r\n- [ ] Bug fix (non-breaking change which fixes an issue)\r\n- [ ] New feature (non-breaking change which adds functionality)\r\n- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)\r\n- [ ] This change requires a documentation update\r\n\r\n**Feature/Issue validation/testing**:\r\n\r\nPlease describe the tests that you ran to verify your changes and relevant result summary. Provide instructions so it can be reproduced.\r\nPlease also list any relevant details for your test configuration.\r\n\r\n- [ ] Test A\r\n- [ ] Test B\r\n\r\n- Logs\r\n\r\n**Special notes for your reviewer**:\r\n\r\n1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.\r\n\r\n**Checklist**:\r\n\r\n- [ ] Have you added unit/e2e tests that prove your fix is effective or that this feature works?\r\n- [ ] Has code been commented, particularly in hard-to-understand areas?\r\n- [ ] Have you made corresponding changes to the documentation?\r\n\r\n**Release note**:\r\n" + ] + } + }, + "metadata": { + "tags": [ + "kserve", + "incubating", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "kserve" + ], + "targetResourceKinds": [ + "Service", + "Namespace" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kserve/kserve/pull/4603", + "repo": "https://github.com/kserve/kserve", + "pr": "https://github.com/kserve/kserve/pull/4675" + }, + "reactions": 5, + "comments": 5, + "synthesizedBy": "regex", + "qualityScore": 66 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kserve installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:36:55.993Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeelasti/kubeelasti-178-helm-forward-values-to-elasti-trough-env-variables.json b/solutions/cncf-generated/kubeelasti/kubeelasti-178-helm-forward-values-to-elasti-trough-env-variables.json new file mode 100644 index 00000000..e829ad6b --- /dev/null +++ b/solutions/cncf-generated/kubeelasti/kubeelasti-178-helm-forward-values-to-elasti-trough-env-variables.json @@ -0,0 +1,77 @@ +{ + "version": "kc-mission-v1", + "name": "kubeelasti-178-helm-forward-values-to-elasti-trough-env-variables", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubeelasti: helm: forward values to elasti trough env variables", + "description": "This allows operator to work in any namespace, not only in `elasti`.\nThis also allows forwarding values from helm chart to elasti pods.\n\nHad to update few third parties, as UTs & e2e didn't want to work (probably due to go1.25.1 or helm 3.18.6).", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This allows operator to work in any namespace, not only in `elasti`.\nThis also allows forwarding values from helm chart to elasti pods.\n\nHad to update few third parties, as UTs & e2e didn't want to wo" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n**Applied to files:**\n- `operator/internal/controller/opsEndpointslices.go`\n\n\n\n
\n🧬 Code graph analysis (8)\n\n
\noperator/internal/controller/opsDeployment.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n\n
\n
\nresolver/cmd/main.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetRe\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## Walkthrough\nIntroduces a centralized config package to source operator and resolver settings from environment variables; updates operator, resolver, and Helm charts to consume these values; removes hard-coded names/ports; bumps controller-tools version; and adjusts CRD schemas and tests accordingly.\n\n## Changes\n| Cohort / File(s) | Summary |\n| --- | --- |\n| **Helm: common env injection**
`charts/elasti/templates/_helpers.tpl`, `charts/elasti/templates/deployment.yaml` | Adds `elasti.commonEnvValues` helper emitting operator/resolver env vars (names, services, ports, KUBERNETES_CLUSTER_DOMAIN). Injects helper into operator and resolver containers; removes explicit KUBERNETES_CLUSTER_DOMAIN envs. |\n| **Operator: adopt config package**
`operator/cmd/main.go`, `operator/internal/informer/informer.go`, `operator/internal/controller/opsDeployment.go`, `operator/internal/controller/opsEndpointslices.go`, `operator/internal/controller/opsInformer.go`, `operator/internal/controller/elastiservice_controller.go` | Imports config; replaces hard-coded resolver/operator identifiers and ports with config-driven values; updates informer namespace/name selectors; uses configured reverse proxy port in EndpointSlices; removes unused resolver constants. |\n| **Config module (new)**
`pkg/config/config.go` | Adds env-driven configuration: Config, ResolverConfig, getters for resolver/operator config and cluster domain; strict parsing with panic on missing/invalid envs; defines env var co", + "codeSnippets": [ + "**Applied to files:**\n- `operator/internal/controller/opsEndpointslices.go`\n\n
\n\n
\n🧬 Code graph analysis (8)\n\n
\noperator/internal/controller/opsDeployment.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n\n
\n
\nresolver/cmd/main.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetRe", + "**Applied to files:**\n- `operator/internal/controller/opsEndpointslices.go`\n\n
\n\n
\n🧬 Code graph analysis (8)\n\n
\noperator/internal/controller/opsDeployment.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n\n
\n
\nresolver/cmd/main.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n\n
\n
\noperator/cmd/main.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetOperatorConfig` (57-64)\n\n
\n\n
\n
\noperator/internal/controller/opsInformer.go (3)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n
\noperator/internal/controller/elastiservice_controller.go (1)\n\n* `ElastiServiceReconciler` (31-40)\n\n
\n
\noperator/internal/informer/informer.go (1)\n\n* `KeyParams` (337-342)\n\n
\n\n
\n
\noperator/internal/controller/elastiservice_controller_test.go (1)
\n\n
\npkg/config/config.go (5)\n\n* `EnvResolverNamespace` (10-10)\n* `EnvResolverDeploymentName` (11-11)\n* `EnvResolverServiceName` (12-12)\n* `EnvResolverPort` (13-13)\n* `EnvResolverProxyPort` (14-14)\n\n
\n\n
\n
\noperator/internal/informer/informer.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n\n
\n
\noperator/internal/controller/opsEndpointslices.go (1)
\n\n
\npkg/config/config.go (1)\n\n* `GetResolverConfig` (43-54)\n\n
\n\n
\n
\nresolver/internal/operator/RPCClient.go (1)
\n\n
\npkg/config/config.go (2)\n\n* `GetOperatorConfig` (57-64)\n* `GetKubernetesClusterDomain` (38-40)\n\n
\n\n
\n\n
\n\n
\n\n
\n🔇 Additional comments (21)
\n\n
\noperator/Makefile (1)
\n\n`152-152`: **LGTM! Controller-tools version update aligns with PR scope.**\n\nThe bump from v0.14.0 to v0.19.0 is consistent with the CRD schema changes throughout the PR.\n\n
\n
\noperator/config/crd/bases/elasti.truefoundry.com_elastiservices.yaml (2)
\n\n`6-6`: **LGTM! Controller-gen version updated consistently.**\n\nThe version bump to v0.19.0 matches the Makefile change and other CRD updates in this PR.\n\n---\n\n`99-101`: **LGTM! Required fields alignment addresses validation gap.**\n\nBased on the past review discussion, these fields were already required at the code level but not enforced in the CRD schema. This change aligns the schema with the actual implementation requirements.\n\n
\n
\noperator/internal/controller/elastiservice_controller_test.go (2)
\n\n`25-25`: **LGTM! Test now uses centralized configuration.**\n\nThe import aligns with the PR's goal of centralizing configuration management.\n\n---\n\n`58-62`: **Add missing cluster domain environment variable.**\n\nThe test should set the cluster domain to prevent panics if reconciliation code begins reading `GetKubernetesClusterDomain()`.\n\n\nApply this diff to add the missing environment variable:", + "
\n
\nresolver/internal/operator/RPCClient.go (1)
\n\n`38-46`: **Make operator host FQDN-safe and set HTTP client timeout.**\n\n- Current code always appends “..svc.” which breaks if ServiceName is already FQDN.\n- http.Client has no timeout; network issues can hang indefinitely.\n\nApply this diff:" + ] + } + }, + "metadata": { + "tags": [ + "kubeelasti", + "sandbox", + "app-definition", + "deploy" + ], + "cncfProjects": [ + "kubeelasti" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "deploy" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/truefoundry/KubeElasti/pull/178", + "repo": "https://github.com/truefoundry/KubeElasti" + }, + "reactions": 0, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubeelasti installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:41:07.907Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeflow/kubeflow-5237-add-support-for-tolerations-and-affinity-in-notebooks.json b/solutions/cncf-generated/kubeflow/kubeflow-5237-add-support-for-tolerations-and-affinity-in-notebooks.json new file mode 100644 index 00000000..1ab8c5ee --- /dev/null +++ b/solutions/cncf-generated/kubeflow/kubeflow-5237-add-support-for-tolerations-and-affinity-in-notebooks.json @@ -0,0 +1,87 @@ +{ + "version": "kc-mission-v1", + "name": "kubeflow-5237-add-support-for-tolerations-and-affinity-in-notebooks", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubeflow: Add support for Tolerations and Affinity in Notebooks", + "description": "This PR adds support for toleration and affinity configs in the Notebook Spawner UI.\n\nResolves: #4433\n\nOptions presented to the user are specified inside `spawner_ui_config.yaml`. This example config allows users to ask for exclusive access to a node within node-pool called `notebook-n1-standard-2`:\n```yaml\nspawnerFormDefaults:\n ...\n affinityConfig:\n # The default `configKey` from the options list\n # If readonly, the default value will be the only option\n value: \"none\"\n # The list ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR adds support for toleration and affinity configs in the Notebook Spawner UI.\n\nResolves: #4433\n\nOptions presented to the user are specified inside `spawner_ui_config.yaml`. This example config " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nspawnerFormDefaults:\r\n ...\r\n affinityConfig:\r\n # The default `configKey` from the options list\r\n # If readonly, the default value will be the only option\r\n value: \"none\"\r\n # The list of available affinity configs\r\n options:\r\n - configKey: \"none\"\r\n displayName: \"None\"\r\n affinity: {}\r\n # (DESC) Pod gets an exclusive \"n1-standard-2\" Node\r\n # (TIP) set PreferNoSchedule taint on this node-pool\r\n # (TIP) enable cluster-autoscaler on this node-pool\r\n \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubeflow/kubeflow/pull/5118. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Fix #4842 - issue was closed earlier, but not solved within Kubeflow repo by author\n\nSets CPU and memory limits of Notebooks equal to requests. This prevents\n- out of memory problems due to over-provisioning\n- users using more resources than they should", + "codeSnippets": [ + "spawnerFormDefaults:\r\n ...\r\n affinityConfig:\r\n # The default `configKey` from the options list\r\n # If readonly, the default value will be the only option\r\n value: \"none\"\r\n # The list of available affinity configs\r\n options:\r\n - configKey: \"none\"\r\n displayName: \"None\"\r\n affinity: {}\r\n # (DESC) Pod gets an exclusive \"n1-standard-2\" Node\r\n # (TIP) set PreferNoSchedule taint on this node-pool\r\n # (TIP) enable cluster-autoscaler on this node-pool", + "spawnerFormDefaults:\r\n ...\r\n affinityConfig:\r\n # The default `configKey` from the options list\r\n # If readonly, the default value will be the only option\r\n value: \"none\"\r\n # The list of available affinity configs\r\n options:\r\n - configKey: \"none\"\r\n displayName: \"None\"\r\n affinity: {}\r\n # (DESC) Pod gets an exclusive \"n1-standard-2\" Node\r\n # (TIP) set PreferNoSchedule taint on this node-pool\r\n # (TIP) enable cluster-autoscaler on this node-pool\r\n # (TIP) dont let users request more CPU/MEMORY than the size of this node\r\n - configKey: \"exclusive__n1-standard-2\"\r\n displayName: \"Exclusive: n1-standard-2\"\r\n affinity:\r\n # (Require) Node having label: `node_pool=notebook-n1-standard-2`\r\n nodeAffinity:\r\n requiredDuringSchedulingIgnoredDuringExecution:\r\n nodeSelectorTerms:\r\n - matchExpressions:\r\n - key: \"node_pool\"\r\n operator: \"In\"\r\n values:\r\n - \"notebook-n1-standard-2\"\r\n # (Require) Node WITHOUT existing Pod having label: `notebook-name`\r\n podAntiAffinity:\r\n requiredDuringSchedulingIgnoredDuringExecution:\r\n - labelSelector:\r\n matchExpressions:\r\n - key: \"notebook-name\"\r\n operator: \"Exists\"\r\n namespaces: []\r\n topologyKey: \"kubernetes.io/hostname\"\r\n readOnly: false\r\n tolerationGroup:\r\n # The default `groupKey` from the options list\r\n # If readonly, the default value will be the only option\r\n value: \"none\"\r\n # The list of available tolerationGroup configs\r\n options:\r\n - groupKey: \"none\"\r\n displayName: \"None\"\r\n tolerations: []\r\n - groupKey: \"group_1\"\r\n displayName: \"Group 1: description\"\r\n tolerations:\r\n - key: \"key1\"\r\n operator: \"Equal\"\r\n value: \"value1\"\r\n effect: \"NoSchedule\"\r\n - key: \"key2\"\r\n operator: \"Equal\"\r\n value: \"value2\"\r\n effect: \"NoSchedule\"\r\n readOnly: false", + "apiVersion: kustomize.config.k8s.io/v1beta1\r\nkind: Kustomization\r\nresources:\r\n - XXXXX\r\n# ----------------\r\n# ↓ our changes ↓\r\n# ----------------\r\nimages:\r\n - name: gcr.io/kubeflow-images-public/jupyter-web-app\r\n newName: gcr.io/kubeflow-images-public/jupyter-web-app\r\n newTag: vmaster-ge4456300" + ] + } + }, + "metadata": { + "tags": [ + "kubeflow", + "incubating", + "app-definition", + "troubleshoot", + "size-l", + "lgtm", + "approved", + "cla--yes", + "ok-to-test" + ], + "cncfProjects": [ + "kubeflow" + ], + "targetResourceKinds": [ + "Pod", + "Namespace", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kubeflow/kubeflow/pull/5237", + "repo": "https://github.com/kubeflow/kubeflow", + "pr": "https://github.com/kubeflow/kubeflow/pull/5118" + }, + "reactions": 7, + "comments": 18, + "synthesizedBy": "regex", + "qualityScore": 70 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubeflow installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:22.610Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeflow/kubeflow-6374-support-k8s-1-22-in-notebook-controller.json b/solutions/cncf-generated/kubeflow/kubeflow-6374-support-k8s-1-22-in-notebook-controller.json new file mode 100644 index 00000000..df8778a6 --- /dev/null +++ b/solutions/cncf-generated/kubeflow/kubeflow-6374-support-k8s-1-22-in-notebook-controller.json @@ -0,0 +1,88 @@ +{ + "version": "kc-mission-v1", + "name": "kubeflow-6374-support-k8s-1-22-in-notebook-controller", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubeflow: Support K8s 1.22 in notebook controller", + "description": "Fix https://github.com/kubeflow/kubeflow/issues/6366\n\nMigrating to Kubebuilder v3 leads to the following changes:\n- Add .dockerignore file.\n- Upgrade Go version from v1.15 to v1.17.\n- Adapt Makefile.\n- Upgrade EnvTest to use K8s v1.22.\n- Update PROJECT template.\n- Migrate CRD apiVersion from v1beta to v1.\n- Add livenessProbe and readinessProbe to controller manager.\n- Upgrade controller-runtime from v0.2.0 to v0.11.0.\n\nOther changes:\n- Build image using public.ecr.aws registry instead of gcr.io.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Fix https://github.com/kubeflow/kubeflow/issues/6366\n\nMigrating to Kubebuilder v3 leads to the following changes:\n- Add .dockerignore file.\n- Upgrade Go version from v1.15 to v1.17.\n- Adapt Makefile.\n" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nmake install\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/opendatahub-io/odh-manifests/pull/538. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR includes the following changes:\n\n- Generate `KfDef` compatible manifests for the [Kubeflow Notebook Controller](https://github.com/opendatahub-io/kubeflow/tree/master/components/notebook-controller).\n- Generate `KfDef` compatible manifests for the [Openshift Notebook Controller](https://github.com/opendatahub-io/kubeflow/tree/master/components/odh-notebook-controller).\n- Add notebook-controller.sh tests.\n- New Grafana dashboard with controller metrics to measure SLIs.\n- Prometheus mock exporter with fake notebook controller metrics.\n- Add minimal and generic notebooks example.\n\nIf you want to try out this PR you can use the following KfDef:\n\n```yaml\n---\napiVersion: kfdef.apps.kubeflow.org/v1\nkind: KfDef\nmetadata:\n name: notebook-controller\n namespace: opendatahub\nspec:\n applications:\n - name: odh-common\n kustomizeConfig:\n repoRef:\n name: manifests\n path: odh-common\n - kustomizeConfig:\n repoRef:\n name: manifests\n path: grafana/cluster\n name: grafana-cluster\n - kustomizeConfig:\n repoRef:\n name: manifests\n path: grafana/grafana\n name: grafana-instance\n - kustomizeConfig:\n repoRef:\n name: manifests\n path: prometheus/cluster\n name: prometheus-cluster\n - kustomizeConfig:\n repoRef:\n name: manifests\n path: prometheus/operator\n name: prometheus-operator\n - kustomizeConfig:\n repoRef:\n name: man", + "codeSnippets": [ + "make install", + "make install", + "make deploy", + "$ kubectl get pods -l app=notebook-controller -n notebook-controller-system\r\nNAME READY STATUS RESTARTS AGE\r\nnotebook-controller-deployment-564d76877-mqsm8 1/1 Running 0 16s", + "---\r\napiVersion: kfdef.apps.kubeflow.org/v1\r\nkind: KfDef\r\nmetadata:\r\n name: notebook-controller\r\n namespace: opendatahub\r\nspec:\r\n applications:\r\n - name: odh-common\r\n kustomizeConfig:\r\n repoRef:\r\n name: manifests\r\n path: odh-common\r\n - kustomizeConfig:\r\n repoRef:\r\n name: manifests\r\n path: grafana/cluster\r\n name: grafana-cluster\r\n - kustomizeConfig:\r\n repoRef:\r\n name: manifests\r\n path: grafana/grafana\r\n name: grafana-instance\r\n - kustomizeConfig:\r\n repoRef:\r\n name: manifests\r\n path: prometheus/cluster\r\n name: prometheus-cluster\r\n - kustomizeConfig:\r\n repoRef:\r\n name: manifests\r\n path: prometheus/operator\r\n name: prometheus-operator\r\n - kustomizeConfig:\r\n repoRef:\r\n name: manifests\r\n path: notebook-controller\r\n name: notebook-controller\r\n repos:\r\n - name: manifests\r\n uri: https://github.com/samuelvl/odh-manifests/tarball/notebook-controller\r\n version: master" + ] + } + }, + "metadata": { + "tags": [ + "kubeflow", + "incubating", + "app-definition", + "troubleshoot", + "size-xxl", + "lgtm", + "approved", + "ok-to-test" + ], + "cncfProjects": [ + "kubeflow" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kubeflow/kubeflow/pull/6374", + "repo": "https://github.com/kubeflow/kubeflow", + "pr": "https://github.com/opendatahub-io/odh-manifests/pull/538" + }, + "reactions": 6, + "comments": 8, + "synthesizedBy": "regex", + "qualityScore": 68 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubeflow installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:25.156Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubeflow/kubeflow-7622-fix-notebook-server-images-with-non-root-securitycontext.json b/solutions/cncf-generated/kubeflow/kubeflow-7622-fix-notebook-server-images-with-non-root-securitycontext.json new file mode 100644 index 00000000..a6612911 --- /dev/null +++ b/solutions/cncf-generated/kubeflow/kubeflow-7622-fix-notebook-server-images-with-non-root-securitycontext.json @@ -0,0 +1,86 @@ +{ + "version": "kc-mission-v1", + "name": "kubeflow-7622-fix-notebook-server-images-with-non-root-securitycontext", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubeflow: fix: notebook server images with non-root SecurityContext", + "description": "resolves https://github.com/kubeflow/kubeflow/issues/5808\n\n# What does this PR do?\n\nThis PR makes the following changes to the `example-notebook-servers`:\n\n- updates the version of [`s6-overlay`](https://github.com/just-containers/s6-overlay) to [`v3.2.0.0`](https://github.com/just-containers/s6-overlay/releases/tag/v3.2.0.0)\n- changes the primary GID of the `jovyan` user from `100` to `0`: \n - for backwards-compatibility, `jovyan` is still a member of `100`\n- fixed the fact that the IDEs cou", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "resolves https://github.com/kubeflow/kubeflow/issues/5808\n\n# What does this PR do?\n\nThis PR makes the following changes to the `example-notebook-servers`:\n\n- updates the version of [`s6-overlay`](http" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nWhat I mean by a \"typical\" container `securityContext` is one that drops all permissions prevents privilege escalation:\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> I bet your OpenShift notebook controller has not been configured to use fsGroup (because of the ADD_FSGROUP environment variable probably being set to false on your notebook-controller).\n\nIt's set to `false`. And when I enable it, pod does not start\n\n> 2024-07-01T12:46:15Z\tDEBUG\tevents\tReissued from statefulset/vscodekf7622: create Pod vscodekf7622-0 in StatefulSet vscodekf7622 failed error: pods \"vscodekf7622-0\" is forbidden: unable to validate against any security context constraint: [provider \"anyuid\": Forbidden: not usable by user or serviceaccount, provider \"pipelines-scc\": Forbidden: not usable by user or serviceaccount, provider \"run-as-ray-user\": Forbidden: not usable by user or serviceaccount, provider restricted-v2: .spec.securityContext.fsGroup: Invalid value: []int64{100}: 100 is not an allowed group, provider \"restricted\": Forbidden: not usable by user or serviceaccount, provider \"nonroot-v2\": Forbidden: not usable by user or serviceaccount, provider \"nonroot\": Forbidden: not usable by user or serviceaccount, provider \"hostmount-anyuid\": Forbidden: not usable by user or serviceaccount, provider \"machine-api-termination-handler\": Forbidden: not usable by user or serviceaccount, provider \"hostnetwork-v2\": Forbidden: not usable by user or serviceaccount, provider \"hostnetwork\": Forbidden: not usable by user or serviceaccount, provider \"hostaccess\": Forbidden: not usable by user or serviceaccount, provider \"hostpath-provisioner\": Forbidden: not usable by user or se", + "codeSnippets": [ + "What I mean by a \"typical\" container `securityContext` is one that drops all permissions prevents privilege escalation:", + "What I mean by a \"typical\" container `securityContext` is one that drops all permissions prevents privilege escalation:", + "For example, here is a Notebook which has this error that uses the `kubeflownotebookswg/jupyter-scipy:v1.9.0-rc.1` image:", + "[restricted-v2.yaml.txt](https://github.com/user-attachments/files/16053326/restricted-v2.yaml.txt)", + "So the allowed groups seem to be in the range 1001010000 - 1001019999, or something like that!\r\n\r\n> Note: The range 1000000000/10000 means 10,000 values starting with ID 1000000000, so it specifies the range of IDs from 1000000000 to 1000009999. https://developer.ibm.com/learningpaths/secure-context-constraints-openshift/deployment-specify-permissions/\n/lgtm for the general technical approach, but I did not check all details.\nIn any case, here's how my mounts look like (with `ADD_FSGROP`=false)" + ] + } + }, + "metadata": { + "tags": [ + "kubeflow", + "incubating", + "app-definition", + "troubleshoot", + "size-l", + "lgtm", + "approved" + ], + "cncfProjects": [ + "kubeflow" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Service", + "Namespace", + "Persistentvolumeclaim", + "Persistentvolume", + "Serviceaccount" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kubeflow/kubeflow/pull/7622", + "repo": "https://github.com/kubeflow/kubeflow" + }, + "reactions": 2, + "comments": 18, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubeflow installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:28.039Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-102819-add-o-extra-columns-format-option-to-kubectl-get.json b/solutions/cncf-generated/kubernetes/kubernetes-102819-add-o-extra-columns-format-option-to-kubectl-get.json new file mode 100644 index 00000000..e2f92396 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-102819-add-o-extra-columns-format-option-to-kubectl-get.json @@ -0,0 +1,87 @@ +{ + "version": "kc-mission-v1", + "name": "kubernetes-102819-add-o-extra-columns-format-option-to-kubectl-get", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubernetes: Add `-o extra-columns` format option to kubectl get", + "description": "Fixes https://github.com/kubernetes/kubernetes/issues/98368\n\nNote to reviewers: This PR is in draft while I get more feedback on if this desired and/or find more time to clean it up. Before merge, I would expect to clean up the logic a bit (notable - remove lazy error handling panics), and add tests.\n\nExamples:\n```\n$ ~/go/bin/kubectl get pod -o \"extra-columns=NAME:.metadata.name,IP:.status.podIP,IMAGE:.spec.containers[*].image\"\nNAME AGE NAME IP ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Fixes https://github.com/kubernetes/kubernetes/issues/98368\n\nNote to reviewers: This PR is in draft while I get more feedback on if this desired and/or find more time to clean it up. Before merge, I w" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ ~/go/bin/kubectl get pod -o \"extra-columns=NAME:.metadata.name,IP:.status.podIP,IMAGE:.spec.containers[*].image\"\r\nNAME AGE NAME IP IMAGE\r\nnoproxy-688f47dc9-vxpbw 74m noproxy-688f47dc9-vxpbw 10.244.0.106 howardjohn/alpine-shell\r\nshell-7854df9c5-dc6ml 74m shell-7854df9c5-dc6ml 10.244.0.105 howardjohn/alpine-shell,gcr.io/istio-testing/proxyv2:latest\r\n$ ~/go/bin/kubectl get svc -o \"extra-columns=NAME:.spec.ports[*].name,PO\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Sorry for the delay!\n\nThis makes sense to me and has community want.\n\nIt seems there are columns missing from the original output though?\n\n```\n _output/bin/kubectl get pods -n metallb-system\nNAME READY STATUS RESTARTS AGE\ncontroller-6b78bff7d9-zfhqt 1/1 Running 3 20d\nspeaker-5r766 1/1 Running 3 20d\nspeaker-c4xbg 1/1 Running 2 20d\nspeaker-x4ft8 1/1 Running 2 20d\nspeaker-ggngz 1/1 Running 3 20d\n\n_output/bin/kubectl get pods -o \"extra-columns=IMAGES:spec.containers[*].image\" -n metallb-system\nNAME AGE IMAGES\ncontroller-6b78bff7d9-zfhqt 20d quay.io/metallb/controller:v0.10.2\nspeaker-5r766 20d quay.io/metallb/speaker:v0.10.2\nspeaker-c4xbg 20d quay.io/metallb/speaker:v0.10.2\nspeaker-x4ft8 20d quay.io/metallb/speaker:v0.10.2\nspeaker-ggngz 20d quay.io/metallb/speaker:v0.10.2\n```", + "codeSnippets": [ + "$ ~/go/bin/kubectl get pod -o \"extra-columns=NAME:.metadata.name,IP:.status.podIP,IMAGE:.spec.containers[*].image\"\r\nNAME AGE NAME IP IMAGE\r\nnoproxy-688f47dc9-vxpbw 74m noproxy-688f47dc9-vxpbw 10.244.0.106 howardjohn/alpine-shell\r\nshell-7854df9c5-dc6ml 74m shell-7854df9c5-dc6ml 10.244.0.105 howardjohn/alpine-shell,gcr.io/istio-testing/proxyv2:latest\r\n$ ~/go/bin/kubectl get svc -o \"extra-columns=NAME:.spec.ports[*].name,PO", + "$ ~/go/bin/kubectl get pod -o \"extra-columns=NAME:.metadata.name,IP:.status.podIP,IMAGE:.spec.containers[*].image\"\r\nNAME AGE NAME IP IMAGE\r\nnoproxy-688f47dc9-vxpbw 74m noproxy-688f47dc9-vxpbw 10.244.0.106 howardjohn/alpine-shell\r\nshell-7854df9c5-dc6ml 74m shell-7854df9c5-dc6ml 10.244.0.105 howardjohn/alpine-shell,gcr.io/istio-testing/proxyv2:latest\r\n$ ~/go/bin/kubectl get svc -o \"extra-columns=NAME:.spec.ports[*].name,PORT:.spec.ports[*].port,TARGET:.spec.ports[*].targetPort\"\r\nNAME AGE NAME PORT TARGET\r\nawake 6h13m http 80 80\r\nkubernetes 6h13m https 443 6443\r\nshell 75m http 9087 9087\r\nsleep 6h13m http 80 80", + "#### Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.:", + "@howardjohn: This issue is currently awaiting triage.\n\nIf a SIG or subproject determines this is a relevant issue, they will accept it by applying the `triage/accepted` label and provide further guidance.\n\nThe `triage/accepted` label can be added by org members by writing `/triage accepted` in a comment.\n\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\nHi @howardjohn. Thanks for your PR.\n\nI'm waiting for a [kubernetes](https://github.com/orgs/kubernetes/people) member to verify that this patch is reasonable to test. If it is, they should reply with `/ok-to-test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](https://git.k8s.io/community/community-membership.md#member) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `ok-to-test` label.\n\nI understand the commands that are listed [here](https://go.k8s.io/bot-commands?repo=kubernetes%2Fkubernetes).\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository.\n
\n\n[APPROVALNOTIFIER] This PR is **NOT APPROVED**\n\nThis pull-request has been approved by: *howardjohn*\nTo complete the [pull request process](https://git.k8s.io/community/contributors/guide/owners.md#the-code-review-process), please assign **deads2k** after the PR has been reviewed.\nYou can assign the PR to them by writing `/assign @deads2k` in a comment when ready.\n\nThe full list of commands accepted by this bot can be found [here](https://go.k8s.io/bot-commands?repo=kubernetes%2Fkubernetes).\n\n
\nNeeds approval from an approver in each of these files:\n\n- **[staging/src/k8s.io/cli-runtime/OWNERS](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/cli-runtime/OWNERS)**\n- **[staging/src/k8s.io/kubectl/OWNERS](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubectl/OWNERS)**\n\nApprovers can indicate their approval by writing `/approve` in a comment\nApprovers can cancel approval by writing `/approve cancel` in a comment\n
\n\n/ok-to-test\r\n/assign @eddiezane @soltysh \r\n\n/test pull-kubernetes-integration\r\nfor a flake. See #103472\r\n\nAny feedback on this? It has been open for 6 weeks without comments :slightly_smiling_face: \nAny feedback on this?\nSorry for the delay!\r\n\r\nThis makes sense to me and has community want.\r\n\r\nIt seems there are columns missing from the original output though?", + "> It seems there are columns missing from the original output though?\r\n\r\nOof.. not sure how I got this far without noticing :facepalm: \r\n\r\nLooks like the issue is we disable server-print when we do this, which is responsible for those. We can enable server-print, but then we get a PartialObjectMetadata object, so we cannot select on the spec/status like we want.\r\n\r\nIt seems the options are reasonably:\r\n* Extend server-print to do this on the server side. Seems complex.\r\n* Find a way to get the full object even when doing server-print. This looks complex since the api-server doesn't actually return this\nThis would be a great feature addition for kubectl. I hope the PR hasn't got too stale and you can figure something out to get the full resource output.\n@howardjohn will you be revisiting this PR?\nI would love to but I think it would require changes to the api-server, not just kubectl, which expands the scope *a lot*\nFor what it worth i hacked together a Ruby script which executes `kubectl` twice, once to get the normal output and once to get the extra columns, and then stitches the two output together: https://gist.github.com/sullerandras/8e534db550119b5d2372d7db11943e2b\r\nIt's not pretty but it does work for me.\nAn example one-liner workaround:" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "troubleshoot", + "area-kubectl", + "release-note", + "needs-rebase", + "size-l", + "kind-feature", + "sig-cli", + "cncf-cla--yes", + "ok-to-test", + "needs-priority", + "needs-triage" + ], + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/kubernetes/kubernetes/pull/102819", + "repo": "https://github.com/kubernetes/kubernetes" + }, + "reactions": 37, + "comments": 22, + "synthesizedBy": "regex", + "qualityScore": 70 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubernetes installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:34:59.520Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-42873-add-kubectl-api-resources-command.json b/solutions/cncf-generated/kubernetes/kubernetes-42873-add-kubectl-api-resources-command.json new file mode 100644 index 00000000..e8500acf --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-42873-add-kubectl-api-resources-command.json @@ -0,0 +1,90 @@ +{ + "version": "kc-mission-v1", + "name": "kubernetes-42873-add-kubectl-api-resources-command", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubernetes: add kubectl api-resources command", + "description": "**What this PR does / why we need it**:\nAs the RBAC role need to be related to resources. I think we can use the command to get the supported resources. \n\n```\n# ./cluster/kubectl.sh api-resources \nNAME SHORTNAMES APIGROUP NAMESPACED KIND\nbindings true Binding\ncomponentstatuses cs false Com", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**What this PR does / why we need it**:\nAs the RBAC role need to be related to resources. I think we can use the command to get the supported resources. \n\n```\n# ./cluster/kubectl.sh api-resources \nN" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nHi @xilabao. Thanks for your PR.\n\nI'm waiting for a [kubernetes](https://github.com/orgs/kubernetes/people) member to verify that this patch is reasonable to test. If it is, they should reply with `@k8s-bot ok to test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should join the org to skip this step.\n\n
\n\nInstructions for interacting with me using PR comments \n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "add three flags. @fabianofranz @adohe @liggitt @janetkuo PTAL\n```\n# ./cluster/kubectl.sh api-resources -h\nPrint the supported API resources on the server\n\nExamples:\n # Print the supported API Resources\n kubectl api-resources\n\nOptions:\n --api-group='': The API group to use when talking to the server.\n --namespaced=true: Namespaced indicates if a resource is namespaced or not.\n --no-headers=false: When using the default or custom-column output format, don't print headers (default print\nheaders).\n\nUsage:\n kubectl api-resources [options]\n\nUse \"kubectl options\" for a list of global command-line options (applies to all commands).\n```", + "codeSnippets": [ + "Hi @xilabao. Thanks for your PR.\n\nI'm waiting for a [kubernetes](https://github.com/orgs/kubernetes/people) member to verify that this patch is reasonable to test. If it is, they should reply with `@k8s-bot ok to test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should join the org to skip this step.\n\n
\n\nInstructions for interacting with me using PR comments", + "Hi @xilabao. Thanks for your PR.\n\nI'm waiting for a [kubernetes](https://github.com/orgs/kubernetes/people) member to verify that this patch is reasonable to test. If it is, they should reply with `@k8s-bot ok to test` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should join the org to skip this step.\n\n
\n\nInstructions for interacting with me using PR comments are available [here](https://github.com/kubernetes/community/blob/master/contributors/devel/pull-request-commands.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%20issue:) repository. I understand the commands that are listed [here](https://github.com/kubernetes/test-infra/blob/master/prow/commands.md).\n
\n\n\n\n\nThis change is [\"Reviewable\"/](https://reviewable.kubernetes.io/reviews/kubernetes/kubernetes/42873)\n\n\ncc @kubernetes/sig-cli-feature-requests @kubernetes/sig-cli-pr-reviews @deads2k @smarterclayton \r\n\r\n@xilabao is there a proposal or issue related to this?\n> @xilabao is there a proposal or issue related to this?\r\n\r\nI have just added. https://github.com/kubernetes/kubernetes/issues/42932\nyou would also need to know the API group\r\n\r\nshowing a resource that allows no verbs is debatable\nPlease also take aggregated api-servers into consideration @xilabao \nIf you use discovery data, aggregated servers and TPR data will automatically work\n@liggitt I will updated it. But I have a question. which should we add, api servers or api group? \r\nanother question @shiywang mentioned. should we also support other format of output like json, yaml ?\nAPI group\nI would not expect someone to use this as an API… if they want that, they should use the actual discovery API", + "This is exactly what I expect, we should figure out the real use case of this command.\n@all PTAL\n@fabianofranz @liggitt @AdoHe @shiywang PTAL\n/assign @adohe\nping @fabianofranz @adohe \n@k8s-bot ok to test\n@k8s-bot verify test this\r\n@k8s-bot cvm gce e2e test this\nNeeds tests for test-cmd\nfixed. @fabianofranz @adohe \nping @fabianofranz @adohe\n/lgtm\n/approve\n@k8s-bot bazel test this\r\n@k8s-bot gce etcd3 e2e test this\r\n@k8s-bot kubemark e2e test this\r\n@k8s-bot unit test this\r\n@k8s-bot kops aws e2e test this\r\n@k8s-bot verify test this\n@fabianofranz @adohe \n/release-note\nadd three flags. @fabianofranz @adohe @liggitt @janetkuo PTAL" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "troubleshoot", + "lgtm", + "release-note", + "size-l", + "kind-feature", + "approved", + "sig-cli", + "cncf-cla--yes", + "priority-important-longterm" + ], + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Service", + "Ingress", + "Configmap", + "Secret", + "Statefulset", + "Daemonset" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/kubernetes/kubernetes/pull/42873", + "repo": "https://github.com/kubernetes/kubernetes" + }, + "reactions": 24, + "comments": 60, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubernetes installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:06.945Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-75831-support-mixed-protocol-lbs.json b/solutions/cncf-generated/kubernetes/kubernetes-75831-support-mixed-protocol-lbs.json new file mode 100644 index 00000000..5169dbf4 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-75831-support-mixed-protocol-lbs.json @@ -0,0 +1,90 @@ +{ + "version": "kc-mission-v1", + "name": "kubernetes-75831-support-mixed-protocol-lbs", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubernetes: Support mixed protocol LBs", + "description": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\nThis PR adds support for configuring a service of type LoadBalancer with more than 1 protocol. For example, this allows configuring a LoadBalancer with both TCP and UDP port 53 (for a DNS server), or TCP and UDP 443 for HTTPS + QUIC (HTTP 3.0).\n\n**Which issue(s) this PR fixes**:\n\n**Special notes for your reviewer**:\n\nMixed protocol LBs are supported by Azure and MetalLB. Other providers MAY support this, however, I'm unable t", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\nThis PR adds support for configuring a service of type LoadBalancer with more than 1 protocol. For example, this allows configuri" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n\r\n\r\n**What type of PR is this?**\r\n/kind bug\r\n\r\n**What this PR does / why we need it**:\r\nfix mixed protocol issue for azure load balancer, with below config (`service.beta.kubernetes.io/azure-load-balancer-mixed-protocols: \"true\"`), azure provider will create both TCP and UDP rules for the service.", + "With this PR, you could see below both TCP and UDP rules are created for the service:\r\n![image](https://user-images.githubusercontent.com/4178417/52937705-da2eef80-339a-11e9-8d4f-9578c8cb1d0f.png)\r\n\r\n**Which issue(s) this PR fixes**:\r\n\r\nFixes #73849\r\n\r\n**Special notes for your reviewer**:\r\nOriginal PR(https://github.com/kubernetes/kubernetes/pull/67986) is not completed, I have no idea why I submitted a non-completed PR at that time...\r\n\r\n**Does this PR introduce a user-facing change?**:\r\n" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "troubleshoot", + "area-cloudprovider", + "release-note", + "needs-rebase", + "size-l", + "kind-feature", + "sig-apps", + "cncf-cla--yes", + "sig-cloud-provider", + "ok-to-test", + "needs-priority" + ], + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/kubernetes/kubernetes/pull/75831", + "repo": "https://github.com/kubernetes/kubernetes", + "pr": "https://github.com/kubernetes/kubernetes/pull/74200" + }, + "reactions": 43, + "comments": 33, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubernetes installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:34:57.335Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-96087-move-all-bash-custom-completions-to-go.json b/solutions/cncf-generated/kubernetes/kubernetes-96087-move-all-bash-custom-completions-to-go.json new file mode 100644 index 00000000..08cb48fc --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-96087-move-all-bash-custom-completions-to-go.json @@ -0,0 +1,110 @@ +{ + "version": "kc-mission-v1", + "name": "kubernetes-96087-move-all-bash-custom-completions-to-go", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubernetes: Move all bash custom completions to Go", + "description": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\nBased on #93714, this PR finishes removing all bash completion scripting and replaces it fully with Go completions.\n\n*Advantages:*\n* easier maintenance of custom completions\n* ability to write Go tests for custom completions\n* allow to eventually move to native zsh completion\n* allow Fish shell completion PR (#92989) to fully support all of kubectl's custom completions\n* removes lack of portability of bash scripts\n* will allo", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\nBased on #93714, this PR finishes removing all bash completion scripting and replaces it fully with Go completions.\n\n*Advantages:" + }, + { + "title": "build kubectl from source", + "description": "build kubectl from source" + }, + { + "title": "generate completion code", + "description": "generate completion code" + }, + { + "title": "play with it", + "description": "play with it" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nmake WHAT=cmd/kubectl\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubernetes/kubernetes/pull/93714. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\nImplement custom completions using Go for flag `--context`, `--cluster`, `--user` and `--namespace`.\n\n**Which issue(s) this PR fixes**:\n\nPart of https://github.com/kubernetes/kubectl/issues/882\n\n**Special notes for your reviewer**:\n\n### Test the new completion code\n\n1. build kubectl from source\n```sh\nmake WHAT=cmd/kubectl\n```\n\n2. generate completion code\n```sh\n# zsh\nsource <(./_output/bin/kubectl completion zsh)\n\n# bash\nsource <(./_output/bin/kubectl completion bash)\n```\n\n3. play with it\n\nPlease note that the new completion code requires the new kubectl.\n\n```\n./_output/bin/kubectl get --context [TAB]\n\n# debug\n./_output/bin/kubectl __complete get --context ''\n```\n\n**Does this PR introduce a user-facing change?**:\n\n**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:\n\n```docs\n\n```", + "codeSnippets": [ + "make WHAT=cmd/kubectl", + "make WHAT=cmd/kubectl", + "# zsh\r\nsource <(./_output/bin/kubectl completion zsh)\r\n\r\n# bash\r\nsource <(./_output/bin/kubectl completion bash)", + "./_output/bin/kubectl get [TAB]\r\n# debug\r\n./_output/bin/kubectl __complete get \"\"\r\n\r\n./_output/bin/kubectl describe -n kube-system pod [TAB]", + "**What type of PR is this?**\r\n\r\n/kind feature\r\n\r\n**What this PR does / why we need it**:\r\n\r\nImplement custom completions using Go for flag `--context`, `--cluster`, `--user` and `--namespace`.\r\n\r\n**Which issue(s) this PR fixes**:\r\n\r\nPart of https://github.com/kubernetes/kubectl/issues/882\r\n\r\n**Special notes for your reviewer**:\r\n\r\n### Test the new completion code\r\n\r\n1. build kubectl from source" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "troubleshoot", + "area-kubectl", + "lgtm", + "release-note", + "size-xl", + "kind-feature", + "approved", + "sig-cli", + "cncf-cla--yes", + "ok-to-test", + "needs-priority" + ], + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Namespace", + "Clusterrole", + "Clusterrolebinding", + "Role", + "Rolebinding" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/kubernetes/kubernetes/pull/96087", + "repo": "https://github.com/kubernetes/kubernetes", + "pr": "https://github.com/kubernetes/kubernetes/pull/93714" + }, + "reactions": 21, + "comments": 34, + "synthesizedBy": "regex", + "qualityScore": 67 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubernetes installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:10.863Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubernetes/kubernetes-97743-support-m1-macbooks-darwin-arm64-on-the-client-side.json b/solutions/cncf-generated/kubernetes/kubernetes-97743-support-m1-macbooks-darwin-arm64-on-the-client-side.json new file mode 100644 index 00000000..ff4f2a82 --- /dev/null +++ b/solutions/cncf-generated/kubernetes/kubernetes-97743-support-m1-macbooks-darwin-arm64-on-the-client-side.json @@ -0,0 +1,90 @@ +{ + "version": "kc-mission-v1", + "name": "kubernetes-97743-support-m1-macbooks-darwin-arm64-on-the-client-side", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubernetes: Support M1 MacBooks darwin/arm64 on the client-side", + "description": "Please test using:\n```\nbuild/run.sh make generated_files && make quick-release-images\n```\n\n**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:\n\n```docs\n\n```", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Please test using:\n```\nbuild/run.sh make generated_files && make quick-release-images\n```\n\n**What type of PR is this?**\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\n**S" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nbuild/run.sh make generated_files && make quick-release-images\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubernetes/kubernetes/pull/97551. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "- Code change: this pr or as [inductor comments](https://gist.github.com/inductor/d944f90cb277d077fb8b737ff74b9cf2)\n- [GO 1.16beta +](https://github.com/golang/go/compare/go1.16beta1...master) build go locally\n- etcd doesn't support arm ? https://github.com/etcd-io/etcd/pull/12557/files\n\n**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\n\n**Special notes for your reviewer**:\n\n**Does this PR introduce a user-facing change?**:\n\n**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:\n```docs\n\n```", + "codeSnippets": [ + "build/run.sh make generated_files && make quick-release-images", + "build/run.sh make generated_files && make quick-release-images", + "build/run.sh make generated_files && make quick-release-images", + "**Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**:\r\n\r\n", + "- Code change: this pr or as [inductor comments](https://gist.github.com/inductor/d944f90cb277d077fb8b737ff74b9cf2)\r\n- [GO 1.16beta +](https://github.com/golang/go/compare/go1.16beta1...master) build go locally\r\n- etcd doesn't support arm ? https://github.com/etcd-io/etcd/pull/12557/files\r\n\r\n\r\n/kind feature\r\n\r\n**What this PR does / why we need it**:\r\n\r\n**Which issue(s) this PR fixes**:\r\n\r\nFixes #97550\r\n\r\n**Special notes for your reviewer**:\r\n\r\n**Does this PR introduce a user-facing change?**:" + ] + } + }, + "metadata": { + "tags": [ + "kubernetes", + "graduated", + "orchestration", + "troubleshoot", + "area-test", + "priority-important-soon", + "lgtm", + "release-note", + "size-m", + "kind-feature", + "area-release-eng", + "approved", + "cncf-cla--yes", + "sig-testing" + ], + "cncfProjects": [ + "kubernetes" + ], + "targetResourceKinds": [], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/kubernetes/kubernetes/pull/97743", + "repo": "https://github.com/kubernetes/kubernetes", + "pr": "https://github.com/kubernetes/kubernetes/pull/97551" + }, + "reactions": 49, + "comments": 57, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubernetes installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:34:51.275Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubescape/kubescape-1332-feat-add-kubescape-patch-command.json b/solutions/cncf-generated/kubescape/kubescape-1332-feat-add-kubescape-patch-command.json new file mode 100644 index 00000000..978dbd2d --- /dev/null +++ b/solutions/cncf-generated/kubescape/kubescape-1332-feat-add-kubescape-patch-command.json @@ -0,0 +1,92 @@ +{ + "version": "kc-mission-v1", + "name": "kubescape-1332-feat-add-kubescape-patch-command", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubescape: feat: add kubescape patch command", + "description": "## Overview\nAdd `kubescape patch` command\n\n## Additional Information\n1. The `kubescape patch` command can be used to patch container images with vulnerabilities.\n2. It uses [copa](https://github.com/project-copacetic/copacetic) and [buildkit](https://github.com/moby/buildkit) under the hood for patching the images, and [grype](https://github.com/anchore/grype) as its engine for scanning the images (at the moment)\n3. The detailed documentation for this command can be found [here](https://github.c", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Overview\nAdd `kubescape patch` command\n\n## Additional Information\n1. The `kubescape patch` command can be used to patch container images with vulnerabilities.\n2. It uses [copa](https://github.com/p" + }, + { + "title": "We should add the image name to the `kubescape patch` follow-up step (which i...", + "description": "We should add the image name to the `kubescape patch` follow-up step (which is printed at the end)" + }, + { + "title": "Maybe we should add to the output some comparison between the old image and t...", + "description": "Maybe we should add to the output some comparison between the old image and the new one, so it will become clear to the user the value he is getting from the `patch` command" + }, + { + "title": "Do we need to print the entire copa output? I think is best to have it in deb...", + "description": "Do we need to print the entire copa output? I think is best to have it in debug mode only" + }, + { + "title": "We shouldn't use the `-r` flag. We have the `-f` flag, where we can specify J...", + "description": "We shouldn't use the `-r` flag. We have the `-f` flag, where we can specify JSON output" + }, + { + "title": "Maybe the commands should be `kubescape image scan` and `kubescape image patch`", + "description": "Maybe the commands should be `kubescape image scan` and `kubescape image patch`" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nsudo buildkitd & \r\nsudo kubescape patch --image \n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "@anubhav06 This feature looks really cool and works smoothly! I'm very excited for this!\n\nI have some general comments about the feature, which are mostly food for thought:\n\n1. We should add the image name to the `kubescape patch` follow-up step (which is printed at the end)\n2. Maybe we should add to the output some comparison between the old image and the new one, so it will become clear to the user the value he is getting from the `patch` command\n3. Do we need to print the entire copa output? I think is best to have it in debug mode only\n4. We shouldn't use the `-r` flag. We have the `-f` flag, where we can specify JSON output\n5. Maybe the commands should be `kubescape image scan` and `kubescape image patch`\n\ncc: @craigbox @dwertent", + "codeSnippets": [ + "sudo buildkitd & \r\nsudo kubescape patch --image ", + "sudo buildkitd & \r\nsudo kubescape patch --image " + ] + } + }, + "metadata": { + "tags": [ + "kubescape", + "incubating", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "kubescape" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kubescape/kubescape/pull/1332", + "repo": "https://github.com/kubescape/kubescape" + }, + "reactions": 2, + "comments": 10, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubescape installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:32.537Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-10015-usb-host-pass-through.json b/solutions/cncf-generated/kubevirt/kubevirt-10015-usb-host-pass-through.json new file mode 100644 index 00000000..57a273d9 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-10015-usb-host-pass-through.json @@ -0,0 +1,88 @@ +{ + "version": "kc-mission-v1", + "name": "kubevirt-10015-usb-host-pass-through", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubevirt: USB host pass through", + "description": "**What this PR does / why we need it**:\nThis PR allows USB devices that are plugged to the cluster's Nodes to be allocated to VMs running in said Nodes.\n\n### Staring with main components and a simple use case\n\nA `cluster admin` can define USB devices he wants to expose to VM using KubeVirt CRD, under permitted host devices\nA new CRD called NodeConfig is introduce to allow requiring USB devices. Note that [enabling](https://kubevirt.io/user-guide/operations/activating_feature_gates/) `HostDevice", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "**What this PR does / why we need it**:\nThis PR allows USB devices that are plugged to the cluster's Nodes to be allocated to VMs running in said Nodes.\n\n### Staring with main components and a simple " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: kubevirt.io/v1\r\nkind: KubeVirt\r\nmetadata:\r\n name: kubevirt\r\n namespace: kubevirt\r\nspec:\r\n configuration:\r\n permittedHostDevices:\r\n usb:\r\n - resourceName: kubevirt.io/storage\r\n selectors:\r\n - vendor: \"46f4\"\r\n product: \"0001\"\r\n developerConfiguration: \r\n featureGates:\r\n - HostDevices\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubevirt/project-infra/pull/2872. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This is possible since:\n https://github.com/kubevirt/kubevirtci/pull/996\n\nThe follow CI would benefit from it:\n https://github.com/kubevirt/kubevirt/pull/10015", + "codeSnippets": [ + "apiVersion: kubevirt.io/v1\r\nkind: KubeVirt\r\nmetadata:\r\n name: kubevirt\r\n namespace: kubevirt\r\nspec:\r\n configuration:\r\n permittedHostDevices:\r\n usb:\r\n - resourceName: kubevirt.io/storage\r\n selectors:\r\n - vendor: \"46f4\"\r\n product: \"0001\"\r\n developerConfiguration: \r\n featureGates:\r\n - HostDevices", + "apiVersion: kubevirt.io/v1\r\nkind: KubeVirt\r\nmetadata:\r\n name: kubevirt\r\n namespace: kubevirt\r\nspec:\r\n configuration:\r\n permittedHostDevices:\r\n usb:\r\n - resourceName: kubevirt.io/storage\r\n selectors:\r\n - vendor: \"46f4\"\r\n product: \"0001\"\r\n developerConfiguration: \r\n featureGates:\r\n - HostDevices", + "spec:\r\n domain:\r\n devices:\r\n hostDevices:\r\n - deviceName: kubevirt.io/storage\r\n name: my-storage-usb-devices", + "(toso) $ export KUBEVIRT_PROVIDER=k8s-1.26-centos9\r\n(toso) $ export KUBEVIRT_TAG=latest\r\n(toso) $ export KUBEVIRT_PROVIDER_EXTRA_ARGS=\"--usb 20M --usb 30M --usb 40M\"\r\n(toso) $ make cluster-up\r\n...\r\n(toso) $ ./cluster-up/ssh.sh node01\r\n(node01) $ dmesg | grep -i idVendor=46f4\r\n[ 1.204437] usb 4-1: New USB device found, idVendor=46f4, idProduct=0001, bcdDevice= 0.00\r\n[ 1.204501] usb 3-1: New USB device found, idVendor=46f4, idProduct=0001, bcdDevice= 0.00\r\n[ 1.330900] usb 4-2: New USB device found, idVendor=46f4, idProduct=0001, bcdDevice= 0.00" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "troubleshoot", + "release-note", + "size-xxl", + "kind-api-change", + "lgtm", + "approved", + "dco-signoff--yes" + ], + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [ + "Namespace", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kubevirt/kubevirt/pull/10015", + "repo": "https://github.com/kubevirt/kubevirt", + "pr": "https://github.com/kubevirt/project-infra/pull/2872" + }, + "reactions": 5, + "comments": 40, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubevirt installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:49.764Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kubevirt/kubevirt-11256-rpm-update-virtualization-packages.json b/solutions/cncf-generated/kubevirt/kubevirt-11256-rpm-update-virtualization-packages.json new file mode 100644 index 00000000..b3023a73 --- /dev/null +++ b/solutions/cncf-generated/kubevirt/kubevirt-11256-rpm-update-virtualization-packages.json @@ -0,0 +1,87 @@ +{ + "version": "kc-mission-v1", + "name": "kubevirt-11256-rpm-update-virtualization-packages", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kubevirt: rpm: Update virtualization packages", + "description": "zation packages. Specifically:\n\n* QEMU (8.0.0 → 8.2.0)\n* libvirt (9.5.0 → 10.0.0)\n* SeaBIOS (1.16.1 → 1.16.3)\n* EDKII (20230524 → 20231122)\n* passt (20230818 → 20231204)\n* virtiofsd (1.7.2 → 1.1.10.1)\n* guestfs-tools (1.50.1 → 1.51.6)\n\nzation technology based on libvirt 10.0.0 and QEMU 8.2.0.\nEach new release of libvirt and QEMU contains numerous improvements and bug fixes.\n```", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "zation packages. Specifically:\n\n* QEMU (8.0.0 → 8.2.0)\n* libvirt (9.5.0 → 10.0.0)\n* SeaBIOS (1.16.1 → 1.16.3)\n* EDKII (20230524 → 20231122)\n* passt (20230818 → 20231204)\n* virtiofsd (1.7.2 → 1.1.10.1)" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n\r\n\r\n**What this PR does / why we need it**:\r\nEnable building and running kubevirt on IBM Z Platform.\r\n\r\nThe Purpose of this Draft PR is not to merge \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubevirt/kubevirt/pull/10490. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "**What this PR does / why we need it**:\nEnable building and running kubevirt on IBM Z Platform.\n\nThe Purpose of this Draft PR is not to merge it as is, but rather have a discussion about the Changes and start gathering Feedback from the community.\n\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\nFixes #\n\n**Special notes for your reviewer**:\nSince this is a WIP PR, here are my open TODOs for the PR:\n\nTODOs:\n\n**Checklist**\n\nThis checklist is not enforcing, but it's a reminder of items that could be relevant to every PR.\nApprovers are expected to review this list.", + "codeSnippets": [ + "\r\n\r\n**What this PR does / why we need it**:\r\nEnable building and running kubevirt on IBM Z Platform.\r\n\r\nThe Purpose of this Draft PR is not to merge", + "\r\n\r\n**What this PR does / why we need it**:\r\nEnable building and running kubevirt on IBM Z Platform.\r\n\r\nThe Purpose of this Draft PR is not to merge it as is, but rather have a discussion about the Changes and start gathering Feedback from the community.\r\n\r\n**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:\r\nFixes #\r\n\r\n**Special notes for your reviewer**:\r\nSince this is a WIP PR, here are my open TODOs for the PR:\r\n\r\nTODOs:\r\n- [x] Test all changed build scripts on s390x\r\n- [x] Rework Graphic device\r\n- [x] Disable ACPI by default on s390x\r\n- [x] Add s390x to tests\r\n- [x] Enable cross-compiling\r\n- [x] Do not build every container image on s390x, as some base images are not available\r\n- [x] Cleanup commits (Squash, wrong author etc.)\r\n- [x] Rebase PR on main\r\n- [ ] (Optional) Split PR into multiple smaller PRs\r\n- [x] Write Release Note\r\n- [x] Checklist below\r\n\r\n**Checklist**\r\n\r\nThis checklist is not enforcing, but it's a reminder of items that could be relevant to every PR.\r\nApprovers are expected to review this list.\r\n\r\n- [x] Design: A [design document](https://github.com/kubevirt/community/tree/main/design-proposals) was considered and is present (link) or not required\r\n- [x] PR: The PR description is expressive enough and will help future contributors\r\n- [x] Code: [Write code that humans can understand](https://en.wikiquote.org/wiki/Martin_Fowler#code-for-humans) and [Keep it simple](https://en.wikipedia.org/wiki/KISS_principle)\r\n- [x] Refactor: You have [left the code cleaner than you found it (Boy Scout Rule)](https://learning.oreilly.com/library/view/97-things-every/9780596809515/ch08.html)\r\n- [x] Upgrade: Impact of this change on upgrade flows was considered and addressed if required\r\n- [x] Testing: New code requires [new unit tests](https://github.com/kubevirt/kubevirt/blob/main/docs/reviewer-guide.md#when-is-a-pr-good-enough). New features and bug fixes require at least on e2e test\r\n- [x] Documentation: A [user-guide update](https://github.com/kubevirt/user-guide/) was considered and is present (link) or not required. You want a user-guide update if it's a user facing feature / API change.\r\n- [x] Community: Announcement to [kubevirt-dev](https://groups.google.com/g/kubevirt-dev/) was considered\r\n\r\n**Release note**:\r\n", + "It seems however that although the migration is in a Running phase, the new virt-launcher instance is already ready and running:", + "However, when I glanced over virt-handler's logs I saw many of these:", + "So I'm still not sure exactly what's causing the problem or what it has to do with your version bumps, but I hope it's a good starting point for further debug.\r\n\r\nLet me know if I can further help you!\r\n\r\nEDIT: these error logs are all over the place, so I tend to think that the same problem fails all 3 tests. However I'm not sure yet why only hotplug tests fail. cc @acardace\nThe serial console bit appears to be a red herring as it shows up in the virt-handler logs quite late, just a little bit before the test is considered failed due to the timeout.\r\n\r\nThis is the actual error:" + ] + } + }, + "metadata": { + "tags": [ + "kubevirt", + "incubating", + "app-definition", + "troubleshoot", + "sig-network", + "release-note", + "size-xxl", + "lgtm", + "approved", + "dco-signoff--yes", + "sig-buildsystem" + ], + "cncfProjects": [ + "kubevirt" + ], + "targetResourceKinds": [], + "difficulty": "expert", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kubevirt/kubevirt/pull/11256", + "repo": "https://github.com/kubevirt/kubevirt", + "pr": "https://github.com/kubevirt/kubevirt/pull/10490" + }, + "reactions": 5, + "comments": 72, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kubevirt installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:51.999Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kured/kured-660-add-multiple-concurrent-node-reboot-feature.json b/solutions/cncf-generated/kured/kured-660-add-multiple-concurrent-node-reboot-feature.json new file mode 100644 index 00000000..3b236156 --- /dev/null +++ b/solutions/cncf-generated/kured/kured-660-add-multiple-concurrent-node-reboot-feature.json @@ -0,0 +1,79 @@ +{ + "version": "kc-mission-v1", + "name": "kured-660-add-multiple-concurrent-node-reboot-feature", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kured: Add multiple concurrent node reboot feature", + "description": "Currently in kured a single node can get a lock with Acquire. There could be situations where multiple nodes might want a lock in the event that a cluster can handle multiple nodes being rebooted. This adds the side-by-side implementation for a multiple node lock situation.\n\n### Testing done\n\nAdded unit tests. Also ran a manual test with `--concurrency=2`. I observed that two nodes rebooted at the same time:\n\n```\n$ kubectl get no\nNAME STATUS ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Currently in kured a single node can get a lock with Acquire. There could be situations where multiple nodes might want a lock in the event that a cluster can handle multiple nodes being rebooted. Thi" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ kubectl get no\r\nNAME STATUS ROLES AGE VERSION\r\naks-nodepool1-14327021-vmss000000 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000001 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000002 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000003 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000004 N\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "It seems that all nodes are rebooting at once: @trstringer \n```\nThere are 5 nodes in the cluster\n0 nodes were removed from pool once:\n0 nodes removed from the pool are now back:\nResult of command kubectl get nodes ... showing unschedulable nodes:\nchart-testing-control-plane \nchart-testing-control-plane2 \nchart-testing-control-plane3 \nchart-testing-worker \nchart-testing-worker2 \nAttempt 1 failed! Trying again in 60 seconds...\n0 nodes were removed from pool once:\n0 nodes removed from the pool are now back:\nResult of command kubectl get nodes ... showing unschedulable nodes:\nchart-testing-control-plane true\nchart-testing-control-plane2 true\nchart-testing-control-plane3 true\nchart-testing-worker true\nchart-testing-worker2 true\nchart-testing-control-plane is now unschedulable!\nchart-testing-control-plane2 is now unschedulable!\nchart-testing-control-plane3 is now unschedulable!\nchart-testing-worker is now unschedulable!\nchart-testing-worker2 is now unschedulable!\nAttempt 2 failed! Trying again in 60 seconds...\n```", + "codeSnippets": [ + "$ kubectl get no\r\nNAME STATUS ROLES AGE VERSION\r\naks-nodepool1-14327021-vmss000000 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000001 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000002 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000003 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000004 N", + "$ kubectl get no\r\nNAME STATUS ROLES AGE VERSION\r\naks-nodepool1-14327021-vmss000000 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000001 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000002 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000003 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000004 NotReady,SchedulingDisabled agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000005 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000006 Ready agent 24m v1.23.8\r\naks-nodepool1-14327021-vmss000007 Ready,SchedulingDisabled agent 24m v1.23.8", + "\"weave.works/kured-node-lock\": \"{\\\"maxOwners\\\":2,\\\"locks\\\":[{\\\"nodeID\\\":\\\"aks-nodepool1-14327021-vmss000007\\\",\\\"metadata\\\":{\\\"unschedulable\\\":false},\\\"created\\\":\\\"2022-09-23T21:06:30.814409507Z\\\",\\\"TTL\\\":0},{\\\"nodeID\\\":\\\"aks-nodepool1-14327021-vmss000004\\\",\\\"metadata\\\":{\\\"unschedulable\\\":false},\\\"created\\\":\\\"2022-09-23T21:06:57.626718467Z\\\",\\\"TTL\\\":0}]}\"", + "There are 5 nodes in the cluster\r\n0 nodes were removed from pool once:\r\n0 nodes removed from the pool are now back:\r\nResult of command kubectl get nodes ... showing unschedulable nodes:\r\nchart-testing-control-plane \r\nchart-testing-control-plane2 \r\nchart-testing-control-plane3 \r\nchart-testing-worker \r\nchart-testing-worker2 \r\nAttempt 1 failed! Trying again in 60 seconds...\r\n0 nodes were removed from pool once:\r\n0 nodes removed from the pool are now back:\r\nResult of command kubectl get nodes ... showing unschedulable nodes:\r\nchart-testing-control-plane true\r\nchart-testing-control-plane2 true\r\nchart-testing-control-plane3 true\r\nchart-testing-worker true\r\nchart-testing-worker2 true\r\nchart-testing-control-plane is now unschedulable!\r\nchart-testing-control-plane2 is now unschedulable!\r\nchart-testing-control-plane3 is now unschedulable!\r\nchart-testing-worker is now unschedulable!\r\nchart-testing-worker2 is now unschedulable!\r\nAttempt 2 failed! Trying again in 60 seconds..." + ] + } + }, + "metadata": { + "tags": [ + "kured", + "sandbox", + "app-definition", + "troubleshoot", + "enhancement", + "keep" + ], + "cncfProjects": [ + "kured" + ], + "targetResourceKinds": [ + "Role", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/kubereboot/kured/pull/660", + "repo": "https://github.com/kubereboot/kured" + }, + "reactions": 2, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kured installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:41:42.219Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kured/kured-814-add-signal-reboot.json b/solutions/cncf-generated/kured/kured-814-add-signal-reboot.json new file mode 100644 index 00000000..aa5ff813 --- /dev/null +++ b/solutions/cncf-generated/kured/kured-814-add-signal-reboot.json @@ -0,0 +1,86 @@ +{ + "version": "kc-mission-v1", + "name": "kured-814-add-signal-reboot", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kured: Add signal-reboot", + "description": "Based on #813\n\nThis PR adds a `--reboot-method` flag with \"command\" (default) and \"signal\" option. The \"command\" option uses the `--reboot-command` on the host with `nsenter` as before. The new \"signal\" mode uses a `SIGRTMIN+5` signal by default against PID 1 (systemd) to reboot the node. The signal can be changed via `--reboot-signal` flag.\n\nWith this, the kured pod runs without privileged mode.\n\nThis PR is published as docker-image (amd64 and arm64): `ghcr.io/ckotzbauer/kured:1.14.0-alpha.2`\nU", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Based on #813\n\nThis PR adds a `--reboot-method` flag with \"command\" (default) and \"signal\" option. The \"command\" option uses the `--reboot-command` on the host with `nsenter` as before. The new \"signa" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nimage:\r\n repository: ghcr.io/ckotzbauer/kured\r\n tag: 1.14.0-alpha.2\r\nupdateStrategy: RollingUpdate\r\nconfiguration:\r\n period: \"0h0m30s\"\r\n rebootDelay: 0h1m0s\r\n rebootSentinel: /sentinel/reboot-required\r\nextraArgs:\r\n reboot-method: signal\r\ncontainerSecurityContext:\r\n readOnlyRootFilesystem: true\r\n privileged: false\r\n allowPrivilegeEscalation: false\r\n capabilities:\r\n drop: [\"*\"]\r\n add: [\"CAP_KILL\"]\r\nvolumes:\r\n - name: sentinel\r\n hostPath:\r\n path: /var/run\r\n type: Direc\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubereboot/charts/pull/51. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Based on #49\n\nDo not merge until https://github.com/kubereboot/kured/pull/814 is released.", + "codeSnippets": [ + "image:\r\n repository: ghcr.io/ckotzbauer/kured\r\n tag: 1.14.0-alpha.2\r\nupdateStrategy: RollingUpdate\r\nconfiguration:\r\n period: \"0h0m30s\"\r\n rebootDelay: 0h1m0s\r\n rebootSentinel: /sentinel/reboot-required\r\nextraArgs:\r\n reboot-method: signal\r\ncontainerSecurityContext:\r\n readOnlyRootFilesystem: true\r\n privileged: false\r\n allowPrivilegeEscalation: false\r\n capabilities:\r\n drop: [\"*\"]\r\n add: [\"CAP_KILL\"]\r\nvolumes:\r\n - name: sentinel\r\n hostPath:\r\n path: /var/run\r\n type: Direc", + "image:\r\n repository: ghcr.io/ckotzbauer/kured\r\n tag: 1.14.0-alpha.2\r\nupdateStrategy: RollingUpdate\r\nconfiguration:\r\n period: \"0h0m30s\"\r\n rebootDelay: 0h1m0s\r\n rebootSentinel: /sentinel/reboot-required\r\nextraArgs:\r\n reboot-method: signal\r\ncontainerSecurityContext:\r\n readOnlyRootFilesystem: true\r\n privileged: false\r\n allowPrivilegeEscalation: false\r\n capabilities:\r\n drop: [\"*\"]\r\n add: [\"CAP_KILL\"]\r\nvolumes:\r\n - name: sentinel\r\n hostPath:\r\n path: /var/run\r\n type: Directory\r\nvolumeMounts:\r\n - name: sentinel\r\n mountPath: /sentinel\r\n readOnly: true", + "$ helm -n kured install -f ~/kured/values.yml kured kubereboot/kured\r\n$ helm -n kured list\r\nNAME \tNAMESPACE\tREVISION\tUPDATED \tSTATUS \tCHART \tAPP VERSION\r\nkured\tkured \t1 \t2023-08-08 21:50:33.438477295 +0000 UTC\tdeployed\tkured-5.1.0\t1.13.2 \r\n$ kubectl -n kured exec -it kured-vs4xb -- /bin/sh\r\n/ # kill -s SIGRTMIN+5 1", + "Aug 08 14:52:08 kermes-dev-k8s-node-a03 systemd[1]: Received SIGRTMIN+6 from PID 22396 (sh).", + "/ # ps -ef | grep kured | grep -v grep\r\n 6107 root 0:00 /usr/bin/kured --ds-name=kured --ds-namespace=kured --metrics-port=8080 --period=0h0m30s --reboot-sentinel=/sentinel/reboot-required --reboot-command=/bin/systemctl reboot --reboot-delay=0h1m0s --log-format=text --reboot-method=signal\r\n/ # grep ^Cap /proc/6107/status\r\nCapInh:\t0000000000000000\r\nCapPrm:\t00000000a80425fb\r\nCapEff:\t00000000a80425fb\r\nCapBnd:\t00000000a80425fb\r\nCapAmb:\t0000000000000000" + ] + } + }, + "metadata": { + "tags": [ + "kured", + "sandbox", + "app-definition", + "troubleshoot", + "enhancement", + "keep", + "security" + ], + "cncfProjects": [ + "kured" + ], + "targetResourceKinds": [ + "Pod", + "Node" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/kubereboot/kured/pull/814", + "repo": "https://github.com/kubereboot/kured", + "pr": "https://github.com/kubereboot/charts/pull/51" + }, + "reactions": 4, + "comments": 14, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kured installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:41:40.818Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-13350-make-sure-that-imagepullsecrets-are-properly-formatted.json b/solutions/cncf-generated/kyverno/kyverno-13350-make-sure-that-imagepullsecrets-are-properly-formatted.json new file mode 100644 index 00000000..2776c06e --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-13350-make-sure-that-imagepullsecrets-are-properly-formatted.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "kyverno-13350-make-sure-that-imagepullsecrets-are-properly-formatted", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kyverno: Make sure that imagePullSecrets are properly formatted", + "description": "## Explanation\n\nIn Kyverno chart version 3.4.2 there is a formatting issue in Kyverno's Helm Chart, introduced by https://github.com/kyverno/kyverno/commit/385eef980e59a2965570a2ef91350e7440d08e84 which causes `imagePullSecrets` to be broken. This is a major issue if you are using a pull-through cache to fetch your Kyverno container image which is common in corporate environments.\n\n[Here is a playground example demonstrating the issue](https://helm-playground.com/#t=N7C0AIHoCpwCQKYBsAOCBO4BmBXAd", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Explanation\n\nIn Kyverno chart version 3.4.2 there is a formatting issue in Kyverno's Helm Chart, introduced by https://github.com/kyverno/kyverno/commit/385eef980e59a2965570a2ef91350e7440d08e84 whi" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nimagePullSecrets:\r\n - map[name:foo]\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "@lukaspj i'm afraid it's not that simple 🤦 \n\nSometimes we use this:\n```yaml\n # -- Image pull secrets\n imagePullSecrets: []\n # - name: secretName\n```\n\nAnd sometimes:\n```yaml\n # -- Image pull secrets\n imagePullSecrets: []\n # - secretName\n```", + "codeSnippets": [ + "imagePullSecrets:\r\n - map[name:foo]", + "imagePullSecrets:\r\n - map[name:foo]", + "imagePullSecrets:\r\n - name: foo", + "apiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n name: roles-dictionary\r\n namespace: default\r\ndata:\r\n allowed-roles: \"[\\\"cluster-admin\\\", \\\"cluster-operator\\\", \\\"tenant-admin\\\"]\"", + "name: prepend-image-registry\r\npolicies:\r\n - prepend_image_registry.yaml\r\nresources:\r\n - resource.yaml\r\nvariables: values.yaml\r\nresults:\r\n - policy: prepend-registry\r\n rule: prepend-registry-containers\r\n resource: mypod\r\n # if mutate rule\r\n patchedResource: patchedResource01.yaml\r\n kind: Pod\r\n result: pass" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security", + "troubleshoot", + "size-s" + ], + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod", + "Configmap", + "Secret", + "Namespace", + "Role" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kyverno/kyverno/pull/13350", + "repo": "https://github.com/kyverno/kyverno" + }, + "reactions": 6, + "comments": 5, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kyverno installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:07.929Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-13635-feat-engine-support-platform-selection-in-imageregistry-context.json b/solutions/cncf-generated/kyverno/kyverno-13635-feat-engine-support-platform-selection-in-imageregistry-context.json new file mode 100644 index 00000000..7b9a031e --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-13635-feat-engine-support-platform-selection-in-imageregistry-context.json @@ -0,0 +1,76 @@ +{ + "version": "kc-mission-v1", + "name": "kyverno-13635-feat-engine-support-platform-selection-in-imageregistry-context", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kyverno: feat(engine): support platform selection in imageRegistry context", + "description": "## Explanation\n\nThis pull request fixes a bug where policies using the `imageRegistry` context would fail when processing container images built exclusively for non-`amd64` architectures, such as `linux/arm64`. The failure occurred because Kyverno would default to requesting the `linux/amd64` platform from the image index, causing an error if that platform was not present.\n\nThis PR resolves the issue by introducing a new feature: an optional `platform` field in the `imageRegistry` context. This ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Explanation\n\nThis pull request fixes a bug where policies using the `imageRegistry` context would fail when processing container images built exclusively for non-`amd64` architectures, such as `lin" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: replace-image-registry-with-harbor\r\nspec:\r\n rules:\r\n - name: redirect-docker\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n mutate:\r\n foreach:\r\n - list: request.object.spec.containers\r\n context:\r\n - name: imageData\r\n imageRegistry:\r\n reference: \"{{ element.image }}\"\r\n # The new platform field is used here to\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## [Codecov](https://app.codecov.io/gh/kyverno/kyverno/pull/13635?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kyverno) Report\n:x: Patch coverage is `60.00000%` with `6 lines` in your changes missing coverage. Please review.\n:white_check_mark: Project coverage is 16.14%. Comparing base ([`f32c357`](https://app.codecov.io/gh/kyverno/kyverno/commit/f32c357fa6db943f5f41620faf6bc7505b7289c4?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kyverno)) to head ([`1fc43b9`](https://app.codecov.io/gh/kyverno/kyverno/commit/1fc43b9ac9a47cc0f58365ffba6ac30785c8e2e0?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kyverno)).\n:warning: Report is 49 commits behind head on main.\n\n| [Files with missing lines](https://app.codecov.io/gh/kyverno/kyverno/pull/13635?dropdown=coverage&src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kyverno) | Patch % | Lines |\n|---|---|---|\n| [pkg/engine/context/loaders/imagedata.go](https://app.codecov.io/gh/kyverno/kyverno/pull/13635?src=pr&el=tree&filepath=pkg%2Fengine%2Fcontext%2Floaders%2Fimagedata.go&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=kyverno#diff-cGtnL2VuZ2luZS9jb250ZXh0L2xvYWRlcnMvaW1hZ2VkYXRhLmdv) | 0.00% | [3 Missing :warning: ](", + "codeSnippets": [ + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: replace-image-registry-with-harbor\r\nspec:\r\n rules:\r\n - name: redirect-docker\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n mutate:\r\n foreach:\r\n - list: request.object.spec.containers\r\n context:\r\n - name: imageData\r\n imageRegistry:\r\n reference: \"{{ element.image }}\"\r\n # The new platform field is used here to", + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: replace-image-registry-with-harbor\r\nspec:\r\n rules:\r\n - name: redirect-docker\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n mutate:\r\n foreach:\r\n - list: request.object.spec.containers\r\n context:\r\n - name: imageData\r\n imageRegistry:\r\n reference: \"{{ element.image }}\"\r\n # The new platform field is used here to target the ARM64 image\r\n platform: \"linux/arm64\"\r\n patchStrategicMerge:\r\n spec:\r\n containers:\r\n - name: \"{{ element.name }}\"\r\n image: harbor.example.com/k8s/{{imageData.repository}}:{{imageData.identifier}}", + "apiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n name: arm\r\nspec:\r\n containers:\r\n - image: registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:arm64-v18.1.0\r\n name: helper-arm" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security", + "troubleshoot", + "size-xl" + ], + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kyverno/kyverno/pull/13635", + "repo": "https://github.com/kyverno/kyverno" + }, + "reactions": 2, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 66 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kyverno installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:17.299Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-1930-add-special-variable-substitution-logic-for-preconditions.json b/solutions/cncf-generated/kyverno/kyverno-1930-add-special-variable-substitution-logic-for-preconditions.json new file mode 100644 index 00000000..cf8c32a3 --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-1930-add-special-variable-substitution-logic-for-preconditions.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "kyverno-1930-add-special-variable-substitution-logic-for-preconditions", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kyverno: add special variable substitution logic for preconditions", + "description": "## Related issue\n\n## Proposed Changes\n- Remove variable substitution logic from preconditions evaluation\n- Put special variable substitution logic before preconditions evaluation\n- Special means \"do not fail on error\" as described in expected result of the issue\n\n### Proof Manifests\n```yaml\napiVersion: kyverno.io/v1\nkind: ClusterPolicy\nmetadata:\n name: set-service-labels\n annotations:\n pod-policies.kyverno.io/autogen-controllers: none\nspec:\n background: false\n rules:\n - name: set-servi", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Related issue\n\n## Proposed Changes\n- Remove variable substitution logic from preconditions evaluation\n- Put special variable substitution logic before preconditions evaluation\n- Special means \"do n" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: set-service-labels\r\n annotations:\r\n pod-policies.kyverno.io/autogen-controllers: none\r\nspec:\r\n background: false\r\n rules:\r\n - name: set-service-labels-pods\r\n match:\r\n resources:\r\n kinds:\r\n - Pod\r\n exclude:\r\n resources:\r\n namespaces:\r\n - \"kube*\"\r\n - \"openshift*\"\r\n - \"kube-*\"\r\n - \"openshift-*\"\r\n preconditions:\r\n any:\r\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "ok, can you please log an issue to track the doc update?\n\nAdding the test scenario:\n>My expectation is that if try to create a loadbalancer service without the specific annotation it should be denied. Only if LB service has exactly this annotation it should be allowed\n>```\n>service.beta.kubernetes.io/azure-load-balancer-internal: \"true\"\n>```\n>In terms of AKS cluster lb service with this annotatio will create internal Lb in azure otherwise it will be public\n\nAdding the policy:\n```\napiVersion: kyverno.io/v1\nkind: ClusterPolicy\nmetadata:\n annotations:\n meta.helm.sh/release-name: kyverno-policies \n meta.helm.sh/release-namespace: kyverno\n pod-policies.kyverno.io/autogen-controllers: none\n labels:\n app.kubernetes.io/managed-by: Helm\n name: if-baltic-restrict-external-load-balancer\nspec:\n background: true\n rules:\n - match:\n resources:\n kinds:\n - Service\n name: match-service-type\n preconditions:\n - key: '{{request.object.spec.type}}'\n operator: Equals\n value: LoadBalancer\n validate:\n deny:\n conditions:\n - key: \"{{ request.object.metadata.annotations.\\\"service.beta.kubernetes.io/azure-load-balancer-internal\\\"}}\"\n operator: NotEquals\n value: \"true\"\n validationFailureAction: enforce\n```", + "codeSnippets": [ + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: set-service-labels\r\n annotations:\r\n pod-policies.kyverno.io/autogen-controllers: none\r\nspec:\r\n background: false\r\n rules:\r\n - name: set-service-labels-pods\r\n match:\r\n resources:\r\n kinds:\r\n - Pod\r\n exclude:\r\n resources:\r\n namespaces:\r\n - \"kube*\"\r\n - \"openshift*\"\r\n - \"kube-*\"\r\n - \"openshift-*\"\r\n preconditions:\r\n any:", + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: set-service-labels\r\n annotations:\r\n pod-policies.kyverno.io/autogen-controllers: none\r\nspec:\r\n background: false\r\n rules:\r\n - name: set-service-labels-pods\r\n match:\r\n resources:\r\n kinds:\r\n - Pod\r\n exclude:\r\n resources:\r\n namespaces:\r\n - \"kube*\"\r\n - \"openshift*\"\r\n - \"kube-*\"\r\n - \"openshift-*\"\r\n preconditions:\r\n any:\r\n - key: \"{{ request.object.metadata.labels.serviceOverride }}\"\r\n operator: NotEquals\r\n value: \"*?\"\r\n mutate:\r\n patchStrategicMerge:\r\n metadata:\r\n labels:\r\n +(service): \"{{ request.object.metadata.labels.app }}\"\r\n spec:\r\n containers:\r\n - (name): \"*\"\r\n env:\r\n - name: \"SERVICE\"\r\n value: \"{{ request.object.metadata.labels.app }}\"\r\n - name: set-service-labels-deployments-and-sets\r\n match:\r\n resources:\r\n kinds:\r\n - Deployment\r\n - DaemonSet\r\n - StatefulSet\r\n exclude:\r\n resources:\r\n namespaces:\r\n - \"kube*\"\r\n - \"openshift*\"\r\n - \"kube-*\"\r\n - \"openshift-*\"\r\n preconditions:\r\n any:\r\n - key: \"{{ request.object.metadata.labels.serviceOverride }}\"\r\n operator: NotEquals\r\n value: \"*?\"\r\n mutate:\r\n patchStrategicMerge:\r\n spec:\r\n template:\r\n metadata:\r\n labels:\r\n +(service): \"{{ request.object.spec.template.metadata.labels.app }}\"\r\n spec:\r\n containers:\r\n - (name): \"*\"\r\n env:\r\n - name: \"SERVICE\"\r\n value: \"{{ request.object.spec.template.metadata.labels.app }}\"", + "kind: Deployment\r\napiVersion: apps/v1\r\nmetadata:\r\n name: audit-deployment\r\n labels:\r\n app: audit\r\n name: audit\r\n service: audit\r\nspec:\r\n replicas: 1\r\n selector:\r\n matchLabels:\r\n app: audit\r\n template:\r\n metadata:\r\n creationTimestamp: null\r\n labels:\r\n app: audit\r\n service: audit\r\n spec:\r\n containers:\r\n - name: audit\r\n image: busybox\r\n resources:\r\n limits:\r\n cpu: 2500m\r\n memory: 3584Mi\r\n requests:\r\n cpu: 1500m\r\n memory: 2Gi\r\n imagePullPolicy: Always\r\n restartPolicy: Always\r\n terminationGracePeriodSeconds: 30\r\n dnsPolicy: ClusterFirst\r\n securityContext: {}", + "---\r\napiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: set-service-labels\r\n annotations:\r\n pod-policies.kyverno.io/autogen-controllers: none\r\nspec:\r\n background: false\r\n rules:\r\n - name: set-service-labels-pods\r\n match:\r\n resources:\r\n kinds:\r\n - Pod\r\n exclude:\r\n resources:\r\n namespaces:\r\n - \"kube*\"\r\n - \"openshift*\"\r\n - \"kube-*\"\r\n - \"openshift-*\"\r\n preconditions:\r\n any:\r\n - key: \"{{ request.object.metadata.labels.serviceOverride }}\"\r\n operator: NotEquals\r\n value: \"*\"\r\n mutate:\r\n patchStrategicMerge:\r\n metadata:\r\n labels:\r\n +(service): \"{{ request.object.metadata.labels.app }}\"\r\n spec:\r\n containers:\r\n - (name): \"*\"\r\n env:\r\n - name: \"SERVICE\"\r\n value: \"{{ request.object.metadata.labels.app }}\"\r\n - name: set-service-labels-deployments-and-sets\r\n match:\r\n resources:\r\n kinds:\r\n - Deployment\r\n - DaemonSet\r\n - StatefulSet\r\n exclude:\r\n resources:\r\n namespaces:\r\n - \"kube*\"\r\n - \"openshift*\"\r\n - \"kube-*\"\r\n - \"openshift-*\"\r\n preconditions:\r\n any:\r\n - key: \"{{ request.object.metadata.labels.serviceOverride }}\"\r\n operator: NotEquals\r\n value: \"*?\"\r\n mutate:\r\n patchStrategicMerge:\r\n spec:\r\n template:\r\n metadata:\r\n labels:\r\n +(service): \"{{ request.object.spec.template.metadata.labels.app }}\"\r\n spec:\r\n containers:\r\n - (name): \"*\"\r\n env:\r\n - name: \"SERVICE\"\r\n value: \"{{ request.object.spec.template.metadata.labels.app }}\"", + "apiVersion: apps/v1\r\nkind: Deployment\r\nmetadata:\r\n name: nginx-deployment\r\n labels:\r\n app: nginx\r\nspec:\r\n replicas: 3\r\n selector:\r\n matchLabels:\r\n app: nginx\r\n template:\r\n metadata:\r\n labels:\r\n app: nginx\r\n spec:\r\n containers:\r\n - name: nginx\r\n image: nginx:1.14.2\r\n ports:\r\n - containerPort: 80" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security", + "troubleshoot" + ], + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod", + "Deployment", + "Service", + "Statefulset", + "Daemonset", + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kyverno/kyverno/pull/1930", + "repo": "https://github.com/kyverno/kyverno" + }, + "reactions": 2, + "comments": 10, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kyverno installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:16.285Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-4199-fix-updaterequest-labeling.json b/solutions/cncf-generated/kyverno/kyverno-4199-fix-updaterequest-labeling.json new file mode 100644 index 00000000..9aed09f6 --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-4199-fix-updaterequest-labeling.json @@ -0,0 +1,83 @@ +{ + "version": "kc-mission-v1", + "name": "kyverno-4199-fix-updaterequest-labeling", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kyverno: Fix UpdateRequest labeling", + "description": "## Explanation\n\nThis PR is to FIX Update Request (UR) labeling for creation and query usage, this was patched but the problem still exist (#4104), and to FIX kyverno keeps processing completed update requests (#4152)\n\nIn Kyverno v1.7.0 and v1.71, namespaced Policy has a create error when generateExistingOnPolicyUpdate: true is used, because label error, the UR resource can't be created. And with ClusterPolicy, when generateExistingOnPolicyUpdate: true is used, kyverno keeps processing complete", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Explanation\n\nThis PR is to FIX Update Request (UR) labeling for creation and query usage, this was patched but the problem still exist (#4104), and to FIX kyverno keeps processing completed update " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: sync-spot-controller-data\r\n annotations:\r\n policies.kyverno.io/title: Sync Spot Controller Data\r\n policies.kyverno.io/category: RightSizing\r\n policies.kyverno.io/subject: Spot.io\r\n policies.kyverno.io/minversion: 1.7.0\r\n policies.kyverno.io/description: >-\r\n Sync Secret and Configmap from kube-system namespace to cloud-services-system.\r\n Those objects are required to run spot-io-right-size-cm-update, to \n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# [Codecov](https://codecov.io/gh/kyverno/kyverno/pull/4199?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None) Report\n> Merging [#4199](https://codecov.io/gh/kyverno/kyverno/pull/4199?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None) (8874e09) into [release-1.7](https://codecov.io/gh/kyverno/kyverno/commit/531355adce8924343630e2f60bdc8679c0a1c5dd?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None) (531355a) will **not change** coverage.\n> The diff coverage is `n/a`.\n\n```diff\n@@ Coverage Diff @@\n## release-1.7 #4199 +/- ##\n============================================\n Coverage 28.58% 28.58% \n============================================\n Files 143 143 \n Lines 19280 19280 \n============================================\n Hits 5512 5512 \n Misses 13096 13096 \n Partials 672 672 \n```\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/kyverno/kyverno/pull/4199?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&ut", + "codeSnippets": [ + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: sync-spot-controller-data\r\n annotations:\r\n policies.kyverno.io/title: Sync Spot Controller Data\r\n policies.kyverno.io/category: RightSizing\r\n policies.kyverno.io/subject: Spot.io\r\n policies.kyverno.io/minversion: 1.7.0\r\n policies.kyverno.io/description: >-\r\n Sync Secret and Configmap from kube-system namespace to cloud-services-system.\r\n Those objects are required to run spot-io-right-size-cm-update, to", + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: sync-spot-controller-data\r\n annotations:\r\n policies.kyverno.io/title: Sync Spot Controller Data\r\n policies.kyverno.io/category: RightSizing\r\n policies.kyverno.io/subject: Spot.io\r\n policies.kyverno.io/minversion: 1.7.0\r\n policies.kyverno.io/description: >-\r\n Sync Secret and Configmap from kube-system namespace to cloud-services-system.\r\n Those objects are required to run spot-io-right-size-cm-update, to get spot\r\n recommendations and create/update configmaps with name -rightsize.\r\nspec:\r\n failurePolicy: Ignore\r\n generateExistingOnPolicyUpdate: true\r\n rules:\r\n - name: sync-spot-controller-secret\r\n match:\r\n all:\r\n - resources:\r\n kinds:\r\n - CronJob\r\n names:\r\n - spot-io-right-size-cm-update\r\n namespaces:\r\n - cloud-services-system\r\n generate:\r\n apiVersion: v1\r\n kind: Secret\r\n name: spotinst-kubernetes-cluster-controller\r\n namespace: cloud-services-system\r\n synchronize: true\r\n clone:\r\n namespace: kube-system\r\n name: spotinst-kubernetes-cluster-controller\r\n - name: sync-spot-controller-configmap\r\n match:\r\n all:\r\n - resources:\r\n kinds:\r\n - CronJob\r\n names:\r\n - spot-io-right-size-cm-update\r\n namespaces:\r\n - cloud-services-system\r\n generate:\r\n apiVersion: v1\r\n kind: ConfigMap\r\n name: spotinst-kubernetes-cluster-controller-config\r\n namespace: cloud-services-system\r\n synchronize: true\r\n clone:\r\n namespace: kube-system\r\n name: spotinst-kubernetes-cluster-controller-config", + "apiVersion: kyverno.io/v1\r\nkind: Policy\r\nmetadata:\r\n name: sync-spot-controller-data\r\n annotations:\r\n policies.kyverno.io/title: Sync Spot Controller Data\r\n policies.kyverno.io/category: RightSizing\r\n policies.kyverno.io/subject: Spot.io\r\n policies.kyverno.io/minversion: 1.7.0\r\n policies.kyverno.io/description: >-\r\n Sync Secret and Configmap from kube-system namespace to cloud-services-system.\r\n Those objects are required to run spot-io-right-size-cm-update, to get spot\r\n recommendations and create/update configmaps with name -rightsize.\r\nspec:\r\n failurePolicy: Ignore\r\n generateExistingOnPolicyUpdate: true\r\n rules:\r\n - name: sync-spot-controller-secret\r\n match:\r\n all:\r\n - resources:\r\n kinds:\r\n - CronJob\r\n names:\r\n - spot-io-right-size-cm-update\r\n generate:\r\n apiVersion: v1\r\n kind: Secret\r\n name: spotinst-kubernetes-cluster-controller\r\n namespace: cloud-services-system\r\n synchronize: true\r\n clone:\r\n namespace: kube-system\r\n name: spotinst-kubernetes-cluster-controller\r\n - name: sync-spot-controller-configmap\r\n match:\r\n all:\r\n - resources:\r\n kinds:\r\n - CronJob\r\n names:\r\n - spot-io-right-size-cm-update\r\n generate:\r\n apiVersion: v1\r\n kind: ConfigMap\r\n name: spotinst-kubernetes-cluster-controller-config\r\n namespace: cloud-services-system\r\n synchronize: true\r\n clone:\r\n namespace: kube-system\r\n name: spotinst-kubernetes-cluster-controller-config" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security", + "troubleshoot", + "cherry-pick-required", + "cherry-pick-completed" + ], + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Deployment", + "Service", + "Configmap", + "Secret", + "Job", + "Cronjob", + "Namespace" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kyverno/kyverno/pull/4199", + "repo": "https://github.com/kyverno/kyverno" + }, + "reactions": 5, + "comments": 7, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kyverno installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:09.897Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-4461-feat-use-standard-selector-for-validationfailureactionoverrides.json b/solutions/cncf-generated/kyverno/kyverno-4461-feat-use-standard-selector-for-validationfailureactionoverrides.json new file mode 100644 index 00000000..30d52c86 --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-4461-feat-use-standard-selector-for-validationfailureactionoverrides.json @@ -0,0 +1,79 @@ +{ + "version": "kc-mission-v1", + "name": "kyverno-4461-feat-use-standard-selector-for-validationfailureactionoverrides", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kyverno: feat: Use standard selector for validationFailureActionOverrides", + "description": "## Explanation\n\nThis PR attempts to resolve https://github.com/kyverno/kyverno/issues/4254.\n\nModify ValidationFailureActionOverrides\n- Add `NamespaceSelector`\n- Generate relative manifests\n- Implement namespace labels matching logic in engineResponse\n- Add test cases\n\n## Related issue\n\nhttps://github.com/kyverno/kyverno/issues/4254\n\n## Milestone of this PR\n\n## Proposed Changes\n\nConvert `ValidationFailureActionOverrides` spec:\n- Add `NamespaceSelector` ([k8s/metav1.LabelSelector](https://pkg.go.d", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Explanation\n\nThis PR attempts to resolve https://github.com/kyverno/kyverno/issues/4254.\n\nModify ValidationFailureActionOverrides\n- Add `NamespaceSelector`\n- Generate relative manifests\n- Implement" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nvalidationFailureActionOverrides:\r\n - action: audit # Action to apply\r\n namespaces:\r\n - dev\r\n namespaceSelector: # List of affected namespaces\r\n matchExpressions:\r\n - key: \"kubernetes.io/metadata.name\"\r\n operator: In\r\n values:\r\n - \"default\"\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Yes, validating selectors seems impossible to me.\nFirst, because it depends on the data in the cluster but validation happens only once at admission time.\nBecause multiple selectors can be totally unrelated and still match a given namespace.\n\nIt looks like we need more work to specify the expected behaviour here.\nTBH I wouldn't focus too much on validation but instead on the expected behaviour.\n\n> One solution could be to take the first match but this can be confusing.\n\nI don't think it's a smart idea.\nMaybe we can give priority to `enforce` in case the namespace matches both `audit` and `enforce` ?\n\nIf `namespaces` and `namespaceSelector` are both defined I would combine them with an AND (combining with an OR is possible by defining two entries).", + "codeSnippets": [ + "validationFailureActionOverrides:\r\n - action: audit # Action to apply\r\n namespaces:\r\n - dev\r\n namespaceSelector: # List of affected namespaces\r\n matchExpressions:\r\n - key: \"kubernetes.io/metadata.name\"\r\n operator: In\r\n values:\r\n - \"default\"", + "validationFailureActionOverrides:\r\n - action: audit # Action to apply\r\n namespaces:\r\n - dev\r\n namespaceSelector: # List of affected namespaces\r\n matchExpressions:\r\n - key: \"kubernetes.io/metadata.name\"\r\n operator: In\r\n values:\r\n - \"default\"", + "apiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n name: roles-dictionary\r\n namespace: default\r\ndata:\r\n allowed-roles: \"[\\\"cluster-admin\\\", \\\"cluster-operator\\\", \\\"tenant-admin\\\"]\"", + "name: prepend-image-registry\r\npolicies:\r\n - prepend_image_registry.yaml\r\nresources:\r\n - resource.yaml\r\nvariables: values.yaml\r\nresults:\r\n - policy: prepend-registry\r\n rule: prepend-registry-containers\r\n resource: mypod\r\n # if mutate rule\r\n patchedResource: patchedResource01.yaml\r\n kind: Pod\r\n result: pass" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security", + "troubleshoot" + ], + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod", + "Configmap", + "Namespace", + "Role" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kyverno/kyverno/pull/4461", + "repo": "https://github.com/kyverno/kyverno" + }, + "reactions": 2, + "comments": 27, + "synthesizedBy": "regex", + "qualityScore": 71 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kyverno installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:12.647Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/kyverno/kyverno-6248-extends-foreach-to-cover-generate-rules.json b/solutions/cncf-generated/kyverno/kyverno-6248-extends-foreach-to-cover-generate-rules.json new file mode 100644 index 00000000..245b9abc --- /dev/null +++ b/solutions/cncf-generated/kyverno/kyverno-6248-extends-foreach-to-cover-generate-rules.json @@ -0,0 +1,87 @@ +{ + "version": "kc-mission-v1", + "name": "kyverno-6248-extends-foreach-to-cover-generate-rules", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "kyverno: Extends foreach to cover generate rules", + "description": "## Explanation\nExtends foreach to cover generate rules.\n\n## Related issue\n\n## Milestone of this PR\n\nDone 👍🏽\n\n## Proposed Changes\nAfter merging this PR, A single \"trigger\" resource will result in the creation of multiple downstream resources for an example Creation of a pod could result in generation of multiple downstream resources like configMap, secret etc.\n\n### Proof Manifests\nIf the pollicy given below is applied then multiple configMap will get created on the creation of a pod\n```yaml\napi", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "## Explanation\nExtends foreach to cover generate rules.\n\n## Related issue\n\n## Milestone of this PR\n\nDone 👍🏽\n\n## Proposed Changes\nAfter merging this PR, A single \"trigger\" resource will result in the" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: basic-policy\r\nspec:\r\n rules:\r\n - name: test\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n generate:\r\n foreach:\r\n - list: \"request.object.spec.containers\"\r\n generateResources:\r\n - kind: ConfigMap\r\n apiVersion: v1\r\n name: \"custom-created-configmap\"\r\n namespace: \"{{request.object.metadata.namespace}}\"\r\n \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kyverno/kyverno/pull/4386. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## Explanation\n\nExtends foreach to cover generate rules.\n\n## Related issue\n\n## Milestone of this PR\nDone 👍🏽 \n\n## Proposed Changes\n\nAfter merging this PR, A single \"trigger\" resource will result in the creation of multiple downstream resources for an example Creation of a pod could result in generation of multiple configMap.\n\n### Proof Manifests\n\nIf the pollicy given below is applied then multiple configMap will get created on the creation of a pod:\n\n```yaml\napiVersion: kyverno.io/v1\nkind: ClusterPolicy\nmetadata:\n name: basic-policy2\nspec:\n rules:\n - name: test-foreach-generate\n match:\n any:\n - resources:\n kinds:\n - Pod\n generate:\n foreach:\n - list: \"request.object.spec.containers\"\n kind: ConfigMap\n apiVersion: v1\n name: \"custom-created-configmap\"\n namespace: \"{{request.object.metadata.namespace}}\"\n synchronize: false\n data:\n data:\n foo: \"bar is my container name\"\n - list: \"request.object.spec.containers\"\n kind: ConfigMap\n apiVersion: v1\n name: \"{{element.name}}-config\"\n namespace: \"{{request.object.metadata.namespace}}\"\n synchronize: false\n data:\n data:\n foo: \"{{element.name}} is my container name\"\n - list: \"request.object.spec.containers\"\n apiVersion: v1\n kind: ConfigMap\n name: \"foreachgen-test\"\n namespace: \"default", + "codeSnippets": [ + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: basic-policy\r\nspec:\r\n rules:\r\n - name: test\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n generate:\r\n foreach:\r\n - list: \"request.object.spec.containers\"\r\n generateResources:\r\n - kind: ConfigMap\r\n apiVersion: v1\r\n name: \"custom-created-configmap\"\r\n namespace: \"{{request.object.metadata.namespace}}\"", + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: basic-policy\r\nspec:\r\n rules:\r\n - name: test\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n generate:\r\n foreach:\r\n - list: \"request.object.spec.containers\"\r\n generateResources:\r\n - kind: ConfigMap\r\n apiVersion: v1\r\n name: \"custom-created-configmap\"\r\n namespace: \"{{request.object.metadata.namespace}}\"\r\n synchronize: false\r\n data:\r\n data:\r\n foo: \"{{element.name}} is my container name\"\r\n - kind: Secret\r\n apiVersion: v1\r\n name: \"custom-created-secret\"\r\n namespace: \"{{request.object.metadata.namespace}}\"\r\n synchronize: false\r\n data:\r\n data:\r\n extra: YmFyCg==", + "apiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n name: static-web\r\n labels:\r\n role: myrole\r\nspec:\r\n containers:\r\n - name: web\r\n image: nginx\r\n ports:\r\n - name: web\r\n containerPort: 80\r\n protocol: TCP", + "apiVersion: kyverno.io/v1\r\nkind: ClusterPolicy\r\nmetadata:\r\n name: basic-policy-cloned\r\nspec:\r\n rules:\r\n - name: test\r\n match:\r\n any:\r\n - resources:\r\n kinds:\r\n - Pod\r\n generate:\r\n foreach:\r\n - list: \"request.object.spec.containers\"\r\n generateResources:\r\n - kind: ConfigMap\r\n apiVersion: v1\r\n name: \"{{element.name}}\"\r\n namespace: \"{{request.object.metadata.namespace}}\"\r\n synchronize: false\r\n clone:\r\n namespace: default\r\n name: custom-created-configmap\r\n - kind: Secret\r\n apiVersion: v1\r\n name: \"custom-created-secret-1\"\r\n namespace: \"{{request.object.metadata.namespace}}\"\r\n synchronize: false\r\n clone:\r\n namespace: default\r\n name: custom-created-secret", + "apiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n name: roles-dictionary\r\n namespace: default\r\ndata:\r\n allowed-roles: \"[\\\"cluster-admin\\\", \\\"cluster-operator\\\", \\\"tenant-admin\\\"]\"" + ] + } + }, + "metadata": { + "tags": [ + "kyverno", + "incubating", + "security", + "troubleshoot", + "stale" + ], + "cncfProjects": [ + "kyverno" + ], + "targetResourceKinds": [ + "Pod", + "Configmap", + "Secret", + "Namespace", + "Role" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/kyverno/kyverno/pull/6248", + "repo": "https://github.com/kyverno/kyverno", + "pr": "https://github.com/kyverno/kyverno/pull/4386" + }, + "reactions": 2, + "comments": 32, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with kyverno installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:15.066Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-188-replace-reverse-sshfs-with-samba.json b/solutions/cncf-generated/lima/lima-188-replace-reverse-sshfs-with-samba.json new file mode 100644 index 00000000..076c3b93 --- /dev/null +++ b/solutions/cncf-generated/lima/lima-188-replace-reverse-sshfs-with-samba.json @@ -0,0 +1,79 @@ +{ + "version": "kc-mission-v1", + "name": "lima-188-replace-reverse-sshfs-with-samba", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "lima: Replace reverse SSHFS with Samba", + "description": "🔴 Current blocker: nls_utf8.ko is missing in openSUSE 15.3 https://bugzilla.opensuse.org/show_bug.cgi?id=1190797\n🔴 I also have to rewrite this PR to make Samba non-default (https://github.com/lima-vm/lima/pull/188#discussion_r718677030)\n\n- - -\nReplace previous PR #118\nFix #20 (`Filesystem sharing`)\n\nSee the changes of `docs/internal.md` for the design.\n\n- On macOS hosts, `/usr/local/sbin/samba-dot-org-smbd` is used as the `smbd` binary.\n This binary can be installed with `brew install samba`", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "🔴 Current blocker: nls_utf8.ko is missing in openSUSE 15.3 https://bugzilla.opensuse.org/show_bug.cgi?id=1190797\n🔴 I also have to rewrite this PR to make Samba non-default (https://github.com/lima-v" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n[2021/07/21 18:47:12.871531, 3] ../../lib/util/util_net.c:257(interpret_string_addr_internal)\r\n interpret_string_addr_internal: getaddrinfo failed for name suda-mbp.local (flags 1026) [nodename nor servname provided, or not known]\r\n[2021/07/21 18:47:12.871626, 3] ../../source3/lib/util_sock.c:1026(get_mydnsfullname)\r\n get_mydnsfullname: getaddrinfo failed for name suda-mbp.local [Unknown error]\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/lima-vm/lima/pull/118. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Sambas are not automatically mounted yet. `mount -t cifs -o credentials=/tmp/credentials //192.168.5.4/lima-0 /mnt/tmp-0`\n\nFIXME: mount takes 25 secs, due to hostname resolution errors\n```\n[2021/07/21 18:47:12.871531, 3] ../../lib/util/util_net.c:257(interpret_string_addr_internal)\n interpret_string_addr_internal: getaddrinfo failed for name suda-mbp.local (flags 1026) [nodename nor servname provided, or not known]\n[2021/07/21 18:47:12.871626, 3] ../../source3/lib/util_sock.c:1026(get_mydnsfullname)\n get_mydnsfullname: getaddrinfo failed for name suda-mbp.local [Unknown error]\n```\n\nA workaround is to add `127.0.0.1 localhost suda-mbp.local` to `/etc/hosts` on the host, but that requires sudo.\n\n- - -\n\nTODOs:\n- supply the credential to the guest\n - via serial? via ISO?\n - Using an ACPI table is not an option, because ACPI is N/A for qemu-system-aarch64\n\n- auto mount samba mounts\n\n- remove sshfs\n\n- decrease debug level", + "codeSnippets": [ + "[2021/07/21 18:47:12.871531, 3] ../../lib/util/util_net.c:257(interpret_string_addr_internal)\r\n interpret_string_addr_internal: getaddrinfo failed for name suda-mbp.local (flags 1026) [nodename nor servname provided, or not known]\r\n[2021/07/21 18:47:12.871626, 3] ../../source3/lib/util_sock.c:1026(get_mydnsfullname)\r\n get_mydnsfullname: getaddrinfo failed for name suda-mbp.local [Unknown error]", + "[2021/07/21 18:47:12.871531, 3] ../../lib/util/util_net.c:257(interpret_string_addr_internal)\r\n interpret_string_addr_internal: getaddrinfo failed for name suda-mbp.local (flags 1026) [nodename nor servname provided, or not known]\r\n[2021/07/21 18:47:12.871626, 3] ../../source3/lib/util_sock.c:1026(get_mydnsfullname)\r\n get_mydnsfullname: getaddrinfo failed for name suda-mbp.local [Unknown error]", + "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/share/man/man8/mount_9p.8\r\n/Library/Developer/CommandLineTools/SDKs/MacOSX10.14.sdk/usr/share/man/man8/mount_9p.8\r\n/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/share/man/man8/mount_9p.8\r\n/Library/Developer/CommandLineTools/SDKs/MacOSX11.1.sdk/usr/share/man/man8/mount_9p.8\r\n/Library/Developer/CommandLineTools/SDKs/MacOSX11.3.sdk/usr/share/man/man8/mount_9p.8\r\n/sbin/mount_9p\r\n/usr/share/man/man8/mount_9p.8" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition", + "troubleshoot", + "impact-changelog" + ], + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/lima-vm/lima/pull/188", + "repo": "https://github.com/lima-vm/lima", + "pr": "https://github.com/lima-vm/lima/pull/118" + }, + "reactions": 2, + "comments": 21, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with lima installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:33.582Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-2306-add-command-to-generate-jsonschema-for-limayaml.json b/solutions/cncf-generated/lima/lima-2306-add-command-to-generate-jsonschema-for-limayaml.json new file mode 100644 index 00000000..83a34ae2 --- /dev/null +++ b/solutions/cncf-generated/lima/lima-2306-add-command-to-generate-jsonschema-for-limayaml.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "lima-2306-add-command-to-generate-jsonschema-for-limayaml", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "lima: Add command to generate jsonschema for limayaml", + "description": "https://pypi.org/project/check-jsonschema/\n\nActually found two bugs, with the current code:\n\n```\nSchema validation errors were encountered.\n examples/default.yaml::$.vmType: None is not of type 'string'\n examples/default.yaml::$.os: None is not of type 'string'\n examples/default.yaml::$.arch: None is not of type 'string'\n examples/default.yaml::$.cpuType.armv7l: None is not of type 'string'\n examples/default.yaml::$.cpuType.aarch64: None is not of type 'string'\n examples/default.yaml::$.cp", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "https://pypi.org/project/check-jsonschema/\n\nActually found two bugs, with the current code:\n\n```\nSchema validation errors were encountered.\n examples/default.yaml::$.vmType: None is not of type 'stri" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nSchema validation errors were encountered.\r\n examples/default.yaml::$.vmType: None is not of type 'string'\r\n examples/default.yaml::$.os: None is not of type 'string'\r\n examples/default.yaml::$.arch: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.armv7l: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.aarch64: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.x86_64: None is not of type 'string'\r\n examples/default.yaml::$.cpus: None is not of\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/lima-vm/lima/pull/2333. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Avoid issues with converting null strings in a map value\n\nChange order to alphabetical, to match the \"limactl info\"\n\n----\n\nHelps with:\n\n* https://github.com/lima-vm/lima/pull/1069\n\n* https://github.com/lima-vm/lima/pull/2306", + "codeSnippets": [ + "Schema validation errors were encountered.\r\n examples/default.yaml::$.vmType: None is not of type 'string'\r\n examples/default.yaml::$.os: None is not of type 'string'\r\n examples/default.yaml::$.arch: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.armv7l: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.aarch64: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.x86_64: None is not of type 'string'\r\n examples/default.yaml::$.cpus: None is not of", + "Schema validation errors were encountered.\r\n examples/default.yaml::$.vmType: None is not of type 'string'\r\n examples/default.yaml::$.os: None is not of type 'string'\r\n examples/default.yaml::$.arch: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.armv7l: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.aarch64: None is not of type 'string'\r\n examples/default.yaml::$.cpuType.x86_64: None is not of type 'string'\r\n examples/default.yaml::$.cpus: None is not of type 'integer'\r\n examples/default.yaml::$.memory: None is not of type 'string'\r\n examples/default.yaml::$.disk: None is not of type 'string'", + "Schema validation errors were encountered.\r\n examples/docker.yaml::$.probes[0]: Additional properties are not allowed ('hint', 'script' were unexpected)\r\n examples/docker.yaml::$.probes[0]: 'Mode' is a required property\r\n examples/docker.yaml::$.probes[0]: 'Description' is a required property\r\n examples/docker.yaml::$.probes[0]: 'Script' is a required property\r\n examples/docker.yaml::$.probes[0]: 'Hint' is a required property", + "Schema validation errors were encountered.\r\n examples/default.yaml::$.additionalDisks: None is not of type 'array'\r\n examples/default.yaml::$.mounts[0].mountPoint: None is not of type 'string'\r\n examples/default.yaml::$.mounts[0].writable: None is not of type 'boolean'\r\n examples/default.yaml::$.mounts[0].sshfs.cache: None is not of type 'boolean'\r\n examples/default.yaml::$.mounts[0].sshfs.followSymlinks: None is not of type 'boolean'\r\n examples/default.yaml::$.mounts[0].sshfs.sftpDriver: None is not of type 'string'\r\n examples/default.yaml::$.mounts[0].9p.securityModel: None is not of type 'string'\r\n examples/default.yaml::$.mounts[0].9p.protocolVersion: None is not of type 'string'\r\n examples/default.yaml::$.mounts[0].9p.msize: None is not of type 'string'\r\n examples/default.yaml::$.mounts[0].9p.cache: None is not of type 'string'\r\n examples/default.yaml::$.mountType: None is not of type 'string'\r\n examples/default.yaml::$.mountInotify: None is not of type 'boolean'\r\n examples/default.yaml::$.ssh.loadDotSSHPubKeys: None is not of type 'boolean'\r\n examples/default.yaml::$.ssh.forwardAgent: None is not of type 'boolean'\r\n examples/default.yaml::$.ssh.forwardX11: None is not of type 'boolean'\r\n examples/default.yaml::$.ssh.forwardX11Trusted: None is not of type 'boolean'\r\n examples/default.yaml::$.firmware.legacyBIOS: None is not of type 'boolean'\r\n examples/default.yaml::$.audio.device: None is not of type 'string'\r\n examples/default.yaml::$.video.display: None is not of type 'string'\r\n examples/default.yaml::$.video.vnc.display: None is not of type 'string'\r\n examples/default.yaml::$.upgradePackages: None is not of type 'boolean'\r\n examples/default.yaml::$.containerd.system: None is not of type 'boolean'\r\n examples/default.yaml::$.containerd.user: None is not of type 'boolean'\r\n examples/default.yaml::$.guestInstallPrefix: None is not of type 'string'\r\n examples/default.yaml::$.networks: None is not of type 'array'\r\n examples/default.yaml::$.hostResolver.enabled: None is not of type 'boolean'\r\n examples/default.yaml::$.hostResolver.ipv6: None is not of type 'boolean'\r\n examples/default.yaml::$.hostResolver.hosts: None is not of type 'object'\r\n examples/default.yaml::$.propagateProxyEnv: None is not of type 'boolean'\r\n examples/default.yaml::$.caCerts.removeDefaults: None is not of type 'boolean'\r\n examples/default.yaml::$.caCerts.files: None is not of type 'array'\r\n examples/default.yaml::$.caCerts.certs: None is not of type 'array'\r\n examples/default.yaml::$.rosetta.enabled: None is not of type 'boolean'\r\n examples/default.yaml::$.rosetta.binfmt: None is not of type 'boolean'\r\n examples/default.yaml::$.plain: None is not of type 'boolean'\r\n examples/default.yaml::$.timezone: None is not of type 'string'", + "\"vmType\": {\r\n \"type\": \"string\"\r\n }," + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/lima-vm/lima/pull/2306", + "repo": "https://github.com/lima-vm/lima", + "pr": "https://github.com/lima-vm/lima/pull/2333" + }, + "reactions": 3, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with lima installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:31.641Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/lima/lima-4595-support-macos-guests.json b/solutions/cncf-generated/lima/lima-4595-support-macos-guests.json new file mode 100644 index 00000000..908676cf --- /dev/null +++ b/solutions/cncf-generated/lima/lima-4595-support-macos-guests.json @@ -0,0 +1,76 @@ +{ + "version": "kc-mission-v1", + "name": "lima-4595-support-macos-guests", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "lima: Support macOS guests", + "description": "Usage:\n```\nlimactl create --video template:macos\nlimactl start macos\nlimactl shell macos\n```\n\nThe password prompt is shown during creating an instance, so as to run `chown root:wheel ~/.lima/_mnt/0/Library/LaunchDaemons/...`, which is required for the `lima-macos-init` launch daemon to run.\n\nThe password for GUI login is randomly generated and stored in `/Users/${USER}.guest/password` in the VM.\n\nFix #3618\n\nTODOs (in follow-up PRs):\n\n (`ch", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Usage:\n```\nlimactl create --video template:macos\nlimactl start macos\nlimactl shell macos\n```\n\nThe password prompt is shown during creating an instance, so as to run `chown root:wheel ~/.lima/_mnt/0/Li" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nlimactl create --video template:macos\r\nlimactl start macos\r\nlimactl shell macos\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> That may help simplifying `FillDefault` with reflection in future?\n\nI don't think `FillDefaults()` will be implemented using reflection, but will use template embedding instead:\n\n* we need a way for custom URL schemes to return the data, not just a redirect (#4135)\n\n* define an `internal:` scheme that can return the following:\n - `internal:override`: contents of `$LIMA_HOME/_config/override.yaml` or empty string\n - `internal:default`: contents of `$LIMA_HOME/_config/default.yaml` or empty string\n - `internal:builtin`: builtin defaults as a LimaYAML config\n - `internal:user`: the template provided by the user\n\n* `limayaml.FillDefaults()` then just invokes `limatmpl.Embed()` on\n ```yaml\n base:\n - internal:override\n - internal:user\n - internal:default\n - internal:builtin\n ```\n\n* URL scheme need to be registered dynamically because `internal` needs to be a closure in order to implement `internal:user`.\n\nThere are some other challenges that I currently no longer remember, but this would re-use the existing code for combining templates instead of having a second implementation.", + "codeSnippets": [ + "limactl create --video template:macos\nlimactl start macos\nlimactl shell macos", + "limactl create --video template:macos\r\nlimactl start macos\r\nlimactl shell macos", + "limactl create --video template:macos\r\nlimactl start macos\r\nlimactl shell macos", + "base:\r\n - internal:override\r\n - internal:user\r\n - internal:default\r\n - internal:builtin" + ] + } + }, + "metadata": { + "tags": [ + "lima", + "incubating", + "app-definition", + "troubleshoot", + "impact-changelog", + "guest-macos" + ], + "cncfProjects": [ + "lima" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/lima-vm/lima/pull/4595", + "repo": "https://github.com/lima-vm/lima" + }, + "reactions": 5, + "comments": 16, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with lima installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:28.935Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd-viz/linkerd-viz-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric.json b/solutions/cncf-generated/linkerd-viz/linkerd-viz-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric.json new file mode 100644 index 00000000..3bc06287 --- /dev/null +++ b/solutions/cncf-generated/linkerd-viz/linkerd-viz-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "linkerd-viz-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "linkerd-viz: expose issuer certificate TTL as a prometheus metric", + "description": "Problem: There is currently no simple way to monitor the expiration time of the issuer certificate in use by linkerd; a surprising omission considering that issuer cert expiration will almost certainly cause visible cluster issues.\n\nSolution: \n\n- When a new issuer certificate is loaded, log its NotAfter time in unix epoch format, along with the current process wall clock time. The two timestamps are passed in via the logrus Fields pattern, allowing operators to easily pull these numbers from pod", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Problem: There is currently no simple way to monitor the expiration time of the issuer certificate in use by linkerd; a surprising omission considering that issuer cert expiration will almost certainl" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nSubject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubestellar/console-kb/pull/6. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# CNCF Mission Generation Report (All Batches)\n\n**Date:** 2026-02-27T17:50:38Z\n**Batches:** 11\n\n---\n## Batch: generation-report-0.md\n# CNCF Mission Generation Report\n\n**Date:** 2026-02-27T17:47:59.472Z\n**Total generated:** 184\n**Skipped (duplicates):** 0\n**Errors:** 0\n\n## Projects Processed\n\n| Project | Maturity | Issues Found | Missions Generated | Errors |\n|---------|----------|-------------|-------------------|--------|\n| argo | graduated | 20 | 20 | 0 |\n| cert-manager | graduated | 17 | 17 | 0 |\n| cilium | graduated | 15 | 15 | 0 |\n| cloudevents | graduated | 3 | 3 | 0 |\n| containerd | graduated | 15 | 15 | 0 |\n| coredns | graduated | 6 | 6 | 0 |\n| cri-o | graduated | 14 | 14 | 0 |\n| crossplane | graduated | 8 | 8 | 0 |\n| cubefs | graduated | 0 | 0 | 0 |\n| dapr | graduated | 8 | 8 | 0 |\n| dragonfly | graduated | 0 | 0 | 0 |\n| envoy | graduated | 16 | 16 | 0 |\n| etcd | graduated | 12 | 12 | 0 |\n| falco | graduated | 9 | 9 | 0 |\n| fluentd | graduated | 2 | 2 | 0 |\n| flux | graduated | 0 | 0 | 0 |\n| harbor | graduated | 7 | 7 | 0 |\n| helm | graduated | 20 | 20 | 0 |\n| in-toto | graduated | 3 | 3 | 0 |\n| istio | graduated | 9 | 9 | 0 |\n\n## Generated Missions\n\n- **argo: feat: Multiple sources for applications** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/10432)\n- **argo: feat: Extra Helm values from external git repo #5826** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/6280)\n- **argo: feat(application-controller): Add support for rollback", + "codeSnippets": [ + "Subject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off", + "Subject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off", + "Introduce Pull Request Template\r\n\r\nGitHub's community guidelines recommend a pull request template, the repo was\r\nlacking one.\r\n\r\nIntroduce a `PULL_REQUEST_TEMPLATE.md` file.\r\n\r\nOnce merged, the\r\n[Community profile checklist](https://github.com/linkerd/linkerd2/community)\r\nshould indicate the repo now provides a pull request template.\r\n\r\nFixes #3321\r\n\r\nSigned-off-by: Jane Smith ", + "=== Skipped\r\n=== SKIP: viz/cmd TestRequestTapByResourceFromAPI/Should_return_error_if_stream_returned_error (0.00s)\r\n --- SKIP: TestRequestTapByResourceFromAPI/Should_return_error_if_stream_returned_error (0.00s)\r\n\r\nDONE 1063 tests, 1 skipped in 94.401s" + ] + } + }, + "metadata": { + "tags": [ + "linkerd-viz", + "graduated", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "linkerd-viz" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/linkerd/linkerd2/pull/13615", + "repo": "https://github.com/linkerd/linkerd2", + "pr": "https://github.com/kubestellar/console-kb/pull/6" + }, + "reactions": 5, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with linkerd-viz installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:42.780Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd-viz/linkerd-viz-2893-added-anti-affinity-when-ha-is-configured.json b/solutions/cncf-generated/linkerd-viz/linkerd-viz-2893-added-anti-affinity-when-ha-is-configured.json new file mode 100644 index 00000000..3670b066 --- /dev/null +++ b/solutions/cncf-generated/linkerd-viz/linkerd-viz-2893-added-anti-affinity-when-ha-is-configured.json @@ -0,0 +1,76 @@ +{ + "version": "kc-mission-v1", + "name": "linkerd-viz-2893-added-anti-affinity-when-ha-is-configured", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "linkerd-viz: Added Anti Affinity when HA is configured", + "description": "The following PR adds anti-affinity rules to `proxy-injector`, `sp-validator`, `linkerd-controller`, `tap` deployments.\n\nThe idea was to make anti-affinity rules both based on `kubernetes.io/hostname` and `failure-domain.beta.kubernetes.io/zone` **preferred** when only the the `--ha` flag is configured.\n\nif the `--required-host-anti-affinity` is also configured along with `--ha`, then the `kubernetes.io/hostname` is **required** while `failure-domain.beta.kubernetes.io/zone` is still **preferred", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "The following PR adds anti-affinity rules to `proxy-injector`, `sp-validator`, `linkerd-controller`, `tap` deployments.\n\nThe idea was to make anti-affinity rules both based on `kubernetes.io/hostname`" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nlinkerd-existence\r\n-----------------\r\n√ 'linkerd-config' config map exists\r\n√ control plane replica sets are ready\r\n× no unschedulable pods\r\n linkerd-controller-7d9bdd85b8-2sk7q: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-74vsr: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> Defaulting to preferred feels like we’re introducing two types of HA, “kinda” HA and “real” HA.\n\n:+1: Agreed.\n\n@grampelberg @Pothulapati So to summarize,\n\n* Remove the `--required-host-anti-affinity` option\n* By default, HA mode uses `required` hostname anti-affinity and `preferred` zone anti-affinity (there will almost always be more replicas than zones, so `required` zone anti-affinity won't work.)\n\nIf the `required` hostname anti-affinity is causing an installation failure, `linkerd check` actually detects the problem and outputs the reason:\n\n```\nlinkerd-existence\n-----------------\n√ 'linkerd-config' config map exists\n√ control plane replica sets are ready\n× no unschedulable pods\n linkerd-controller-7d9bdd85b8-2sk7q: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\n linkerd-controller-7d9bdd85b8-74vsr: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\n linkerd-controller-7d9bdd85b8-97lds: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\n linkerd-controller-7d9bdd85b8-9lj9v: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\n linkerd-contro", + "codeSnippets": [ + "linkerd-existence\r\n-----------------\r\n√ 'linkerd-config' config map exists\r\n√ control plane replica sets are ready\r\n× no unschedulable pods\r\n linkerd-controller-7d9bdd85b8-2sk7q: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-74vsr: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy", + "linkerd-existence\r\n-----------------\r\n√ 'linkerd-config' config map exists\r\n√ control plane replica sets are ready\r\n× no unschedulable pods\r\n linkerd-controller-7d9bdd85b8-2sk7q: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-74vsr: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-97lds: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-9lj9v: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-9sx8d: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-kbzwn: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-lcgww: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-zmsn4: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-proxy-injector-76c4f5c7d9-98gm8: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-proxy-injector-76c4f5c7d9-w7jwc: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-sp-validator-6bc6cc666b-4k5zs: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-sp-validator-6bc6cc666b-qvkdb: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-tap-8688fdf4f-c5tgb: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-tap-8688fdf4f-wmlnz: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n see https://linkerd.io/checks/#l5d-existence-unschedulable-pods for hints", + "✗ k get no -L failure-domain.beta.kubernetes.io/zone\r\nNAME STATUS ROLES AGE VERSION ZONE\r\ngke-isim-dev-ha-default-pool-4b003e42-1m9b Ready 22h v1.13.7-gke.8 us-east1-b\r\ngke-isim-dev-ha-default-pool-4b003e42-p1ps Ready 22h v1.13.7-gke.8 us-east1-b\r\ngke-isim-dev-ha-default-pool-560160bf-7sl6 Ready 20h v1.13.7-gke.8 us-east1-c\r\ngke-isim-dev-ha-default-pool-560160bf-cnzh Ready 22h v1.13.7-gke.8 us-east1-c\r\ngke-isim-dev-ha-default-pool-f4da19f4-4n3c Ready 22h v1.13.7-gke.8 us-east1-d\r\ngke-isim-dev-ha-default-pool-f4da19f4-g79p Ready 22h v1.13.7-gke.8 us-east1-d\r\n\r\n✗ k -n linkerd get po -owide\r\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\r\nlinkerd-controller-5b5765b845-7cnpz 3/3 Running 0 18m 10.60.1.27 gke-isim-dev-ha-default-pool-4b003e42-p1ps # us-east1-b\r\nlinkerd-controller-5b5765b845-j6ss9 3/3 Running 0 18m 10.60.3.20 gke-isim-dev-ha-default-pool-560160bf-cnzh # us-east1-c\r\nlinkerd-controller-5b5765b845-r92md 3/3 Running 0 18m 10.60.4.23 gke-isim-dev-ha-default-pool-f4da19f4-4n3c # us-east1-d\r\nlinkerd-grafana-7df55df848-888ck 2/2 Running 0 12m 10.60.1.30 gke-isim-dev-ha-default-pool-4b003e42-p1ps \r\nlinkerd-identity-74cf6f446f-7r57m 2/2 Running 0 18m 10.60.4.22 gke-isim-dev-ha-default-pool-f4da19f4-4n3c # us-east1-d\r\nlinkerd-identity-74cf6f446f-b9gfc 2/2 Running 0 18m 10.60.1.26 gke-isim-dev-ha-default-pool-4b003e42-p1ps #us-east1-b \r\nlinkerd-identity-74cf6f446f-fbw5w 2/2 Running 0 18m 10.60.2.20 gke-isim-dev-ha-default-pool-560160bf-7sl6 # us-east1-c\r\nlinkerd-prometheus-7bcc6c5b66-rv7n4 2/2 Running 3 18m 10.60.0.17 gke-isim-dev-ha-default-pool-4b003e42-1m9b \r\nlinkerd-proxy-injector-746bfbb494-8w8qk 2/2 Running 0 18m 10.60.3.21 gke-isim-dev-ha-default-pool-560160bf-cnzh # us-east1-c\r\nlinkerd-proxy-injector-746bfbb494-s42qm 2/2 Running 0 18m 10.60.4.24 gke-isim-dev-ha-default-pool-f4da19f4-4n3c # us-east1-d\r\nlinkerd-proxy-injector-746bfbb494-wlxxf 2/2 Running 0 18m 10.60.1.28 gke-isim-dev-ha-default-pool-4b003e42-p1ps # us-east1-b\r\nlinkerd-sp-validator-6947dff89c-4nqrs 2/2 Running 0 18m 10.60.0.18 gke-isim-dev-ha-default-pool-4b003e42-1m9b # us-east1-b\r\nlinkerd-sp-validator-6947dff89c-64vv9 2/2 Running 0 18m 10.60.2.23 gke-isim-dev-ha-default-pool-560160bf-7sl6 # us-east1-c\r\nlinkerd-sp-validator-6947dff89c-cdd4q 2/2 Running 0 18m 10.60.5.20 gke-isim-dev-ha-default-pool-f4da19f4-g79p # us-east1-d\r\nlinkerd-tap-5d7745b8c8-n9xwj 2/2 Running 0 18m 10.60.5.21 gke-isim-dev-ha-default-pool-f4da19f4-g79p # us-east1-d\r\nlinkerd-tap-5d7745b8c8-q9rtx 2/2 Running 0 18m 10.60.3.22 gke-isim-dev-ha-default-pool-560160bf-cnzh # us-east1-c\r\nlinkerd-tap-5d7745b8c8-rjznc 2/2 Running 0 18m 10.60.1.29 gke-isim-dev-ha-default-pool-4b003e42-p1ps # us-east1-b\r\nlinkerd-web-7cd4bf9d7-cd4bv 2/2 Running 0 18m 10.60.2.21 gke-isim-dev-ha-default-pool-560160bf-7sl6 ", + "✗ k -n linkerd scale deploy/linkerd-controller --replicas=10\r\ndeployment.extensions/linkerd-controller scaled\r\n✗ linkerd check\r\nlinkerd-existence\r\n-----------------\r\n√ 'linkerd-config' config map exists\r\n√ control plane replica sets are ready\r\n× no unschedulable pods\r\n linkerd-controller-5b5765b845-9nq7d: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-5b5765b845-ncztn: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-5b5765b845-tgdlp: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-5b5765b845-xshbc: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n see https://linkerd.io/checks/#l5d-existence-unschedulable-pods for hints" + ] + } + }, + "metadata": { + "tags": [ + "linkerd-viz", + "graduated", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "linkerd-viz" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/linkerd/linkerd2/pull/2893", + "repo": "https://github.com/linkerd/linkerd2" + }, + "reactions": 2, + "comments": 13, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with linkerd-viz installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:52.706Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd-viz/linkerd-viz-3470-the-linkerd-proxy-does-not-work-with-headless-services.json b/solutions/cncf-generated/linkerd-viz/linkerd-viz-3470-the-linkerd-proxy-does-not-work-with-headless-services.json new file mode 100644 index 00000000..3711300c --- /dev/null +++ b/solutions/cncf-generated/linkerd-viz/linkerd-viz-3470-the-linkerd-proxy-does-not-work-with-headless-services.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "linkerd-viz-3470-the-linkerd-proxy-does-not-work-with-headless-services", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "linkerd-viz: The linkerd proxy does not work with headless services", + "description": "The linkerd proxy does not work with headless services (i.e. endpoints not referencing a pod).\n\nChanged endpoints_watcher to also return endpoints with no targetref. Changed endpoint_translator to handle addresses with no associated pod.\n\nRan tests in minikube verifying that the proxy handles headless services correctly both in cases with and without port-remapping.\n\nSigned-of-by: Johannes Hansen ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "The linkerd proxy does not work with headless services (i.e. endpoints not referencing a pod).\n\nChanged endpoints_watcher to also return endpoints with no targetref. Changed endpoint_translator to han" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\npanic: runtime error: invalid memory address or nil pointer dereference\r\n[signal SIGSEGV: segmentation violation code=0x1 addr=0xb0 pc=0x13feefa]\r\n\r\ngoroutine 288 [running]:\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTranslator).toWeightedAddr(0xc000969630, 0xc000427300, 0xc, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0004d1f40, ...)\r\n\t/linkerd-build/controller/api/destination/endpoint_translator.go:123 +0x3a\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTransla\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubestellar/console-kb/pull/6. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# CNCF Mission Generation Report (All Batches)\n\n**Date:** 2026-02-27T17:50:38Z\n**Batches:** 11\n\n---\n## Batch: generation-report-0.md\n# CNCF Mission Generation Report\n\n**Date:** 2026-02-27T17:47:59.472Z\n**Total generated:** 184\n**Skipped (duplicates):** 0\n**Errors:** 0\n\n## Projects Processed\n\n| Project | Maturity | Issues Found | Missions Generated | Errors |\n|---------|----------|-------------|-------------------|--------|\n| argo | graduated | 20 | 20 | 0 |\n| cert-manager | graduated | 17 | 17 | 0 |\n| cilium | graduated | 15 | 15 | 0 |\n| cloudevents | graduated | 3 | 3 | 0 |\n| containerd | graduated | 15 | 15 | 0 |\n| coredns | graduated | 6 | 6 | 0 |\n| cri-o | graduated | 14 | 14 | 0 |\n| crossplane | graduated | 8 | 8 | 0 |\n| cubefs | graduated | 0 | 0 | 0 |\n| dapr | graduated | 8 | 8 | 0 |\n| dragonfly | graduated | 0 | 0 | 0 |\n| envoy | graduated | 16 | 16 | 0 |\n| etcd | graduated | 12 | 12 | 0 |\n| falco | graduated | 9 | 9 | 0 |\n| fluentd | graduated | 2 | 2 | 0 |\n| flux | graduated | 0 | 0 | 0 |\n| harbor | graduated | 7 | 7 | 0 |\n| helm | graduated | 20 | 20 | 0 |\n| in-toto | graduated | 3 | 3 | 0 |\n| istio | graduated | 9 | 9 | 0 |\n\n## Generated Missions\n\n- **argo: feat: Multiple sources for applications** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/10432)\n- **argo: feat: Extra Helm values from external git repo #5826** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/6280)\n- **argo: feat(application-controller): Add support for rollback", + "codeSnippets": [ + "panic: runtime error: invalid memory address or nil pointer dereference\r\n[signal SIGSEGV: segmentation violation code=0x1 addr=0xb0 pc=0x13feefa]\r\n\r\ngoroutine 288 [running]:\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTranslator).toWeightedAddr(0xc000969630, 0xc000427300, 0xc, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0004d1f40, ...)\r\n\t/linkerd-build/controller/api/destination/endpoint_translator.go:123 +0x3a\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTransla", + "panic: runtime error: invalid memory address or nil pointer dereference\r\n[signal SIGSEGV: segmentation violation code=0x1 addr=0xb0 pc=0x13feefa]\r\n\r\ngoroutine 288 [running]:\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTranslator).toWeightedAddr(0xc000969630, 0xc000427300, 0xc, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0004d1f40, ...)\r\n\t/linkerd-build/controller/api/destination/endpoint_translator.go:123 +0x3a\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTranslator).Add(0xc000969630, 0xc00049c5d0)\r\n\t/linkerd-build/controller/api/destination/endpoint_translator.go:50 +0x1c5\r\ngithub.com/linkerd/linkerd2/controller/api/destination/watcher.(*portPublisher).subscribe(0xc0004ae340, 0x1b1d100, 0xc000969630)\r\n\t/linkerd-build/controller/api/destination/watcher/endpoints_watcher.go:501 +0x68\r\ngithub.com/linkerd/linkerd2/controller/api/destination/watcher.(*servicePublisher).subscribe(0xc000422480, 0xc000000050, 0x0, 0x0, 0x1b1d100, 0xc000969630)\r\n\t/linkerd-build/controller/api/destination/watcher/endpoints_watcher.go:322 +0xda\r\ngithub.com/linkerd/linkerd2/controller/api/destination/watcher.(*EndpointsWatcher).Subscribe(0xc0004064e0, 0xc0005806d2, 0x5, 0xc0005806c0, 0x11, 0xc000000050, 0x0, 0x0, 0x1b1d100, 0xc000969630, ...)\r\n\t/linkerd-build/controller/api/destination/watcher/endpoints_watcher.go:158 +0x290\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*server).Get(0xc0003a8a20, 0xc0009694f0, 0x1b4a800, 0xc0005916d0, 0x0, 0x0)\r\n\t/linkerd-build/controller/api/destination/server.go:123 +0x70e\r\ngithub.com/linkerd/linkerd2-proxy-api/go/destination._Destination_Get_Handler(0x16aaf00, 0xc0003a8a20, 0x1b42280, 0xc0006a4ec0, 0xc0009694a0, 0x20)\r\n\t/go/pkg/mod/github.com/linkerd/linkerd2-proxy-api@v0.1.9/go/destination/destination.pb.go:1823 +0x109\r\ngithub.com/grpc-ecosystem/go-grpc-prometheus.StreamServerInterceptor(0x16aaf00, 0xc0003a8a20, 0x1b424c0, 0xc0002de3c0, 0xc0006a4d60, 0x192c7d0, 0x1b30440, 0xc000329c20)\r\n\t/go/pkg/mod/github.com/grpc-ecosystem/go-grpc-prometheus@v0.0.0-20160910222444-6b7015e65d36/server.go:40 +0xe3\r\ngoogle.golang.org/grpc.(*Server).processStreamingRPC(0xc0000ad980, 0x1b55a20, 0xc000766480, 0xc0004aa900, 0xc0004069c0, 0x294db00, 0x0, 0x0, 0x0)\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:1209 +0x462\r\ngoogle.golang.org/grpc.(*Server).handleStream(0xc0000ad980, 0x1b55a20, 0xc000766480, 0xc0004aa900, 0x0)\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:1282 +0xd3f\r\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.1(0xc000322710, 0xc0000ad980, 0x1b55a20, 0xc000766480, 0xc0004aa900)\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:717 +0x9f\r\ncreated by google.golang.org/grpc.(*Server).serveStreams.func1\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:715 +0xa1", + "YAML I used for testing:", + "Using a `curl` container:" + ] + } + }, + "metadata": { + "tags": [ + "linkerd-viz", + "graduated", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "linkerd-viz" + ], + "targetResourceKinds": [ + "Pod", + "Service" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/linkerd/linkerd2/pull/3470", + "repo": "https://github.com/linkerd/linkerd2", + "pr": "https://github.com/kubestellar/console-kb/pull/6" + }, + "reactions": 2, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with linkerd-viz installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:54.281Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd-viz/linkerd-viz-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addres.json b/solutions/cncf-generated/linkerd-viz/linkerd-viz-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addres.json new file mode 100644 index 00000000..bf06bff2 --- /dev/null +++ b/solutions/cncf-generated/linkerd-viz/linkerd-viz-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addres.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "linkerd-viz-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addres", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "linkerd-viz: Ignore pods with status.phase=Succeeded when watching IP addresses", + "description": "Ignore pods with status.phase=Succeeded when watching IP addresses\n\nWhen a pod terminates successfully, some CNIs will assign its IP address\nto newly created pods. This can lead to duplicate pod IPs in the same\nKubernetes cluster.\n\nFilter out pods which are in a Succeeded phase since they are not \nroutable anymore.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Ignore pods with status.phase=Succeeded when watching IP addresses\n\nWhen a pod terminates successfully, some CNIs will assign its IP address\nto newly created pods. This can lead to duplicate pod IPs i" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n❯ env GITHUB_TOKEN='..' bin/install-pr --k3d 5412\r\n..\r\nINFO[0004] DONE \r\n/home/kevin/Projects/linkerd/linkerd2\r\n\r\nLinkerd CLI available:\r\n/home/kevin/Projects/linkerd/linkerd2/target/release/linkerd2-cli-git-65f0d802-linux-amd64\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubestellar/console-kb/pull/6. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# CNCF Mission Generation Report (All Batches)\n\n**Date:** 2026-02-27T17:50:38Z\n**Batches:** 11\n\n---\n## Batch: generation-report-0.md\n# CNCF Mission Generation Report\n\n**Date:** 2026-02-27T17:47:59.472Z\n**Total generated:** 184\n**Skipped (duplicates):** 0\n**Errors:** 0\n\n## Projects Processed\n\n| Project | Maturity | Issues Found | Missions Generated | Errors |\n|---------|----------|-------------|-------------------|--------|\n| argo | graduated | 20 | 20 | 0 |\n| cert-manager | graduated | 17 | 17 | 0 |\n| cilium | graduated | 15 | 15 | 0 |\n| cloudevents | graduated | 3 | 3 | 0 |\n| containerd | graduated | 15 | 15 | 0 |\n| coredns | graduated | 6 | 6 | 0 |\n| cri-o | graduated | 14 | 14 | 0 |\n| crossplane | graduated | 8 | 8 | 0 |\n| cubefs | graduated | 0 | 0 | 0 |\n| dapr | graduated | 8 | 8 | 0 |\n| dragonfly | graduated | 0 | 0 | 0 |\n| envoy | graduated | 16 | 16 | 0 |\n| etcd | graduated | 12 | 12 | 0 |\n| falco | graduated | 9 | 9 | 0 |\n| fluentd | graduated | 2 | 2 | 0 |\n| flux | graduated | 0 | 0 | 0 |\n| harbor | graduated | 7 | 7 | 0 |\n| helm | graduated | 20 | 20 | 0 |\n| in-toto | graduated | 3 | 3 | 0 |\n| istio | graduated | 9 | 9 | 0 |\n\n## Generated Missions\n\n- **argo: feat: Multiple sources for applications** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/10432)\n- **argo: feat: Extra Helm values from external git repo #5826** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/6280)\n- **argo: feat(application-controller): Add support for rollback", + "codeSnippets": [ + "❯ env GITHUB_TOKEN='..' bin/install-pr --k3d 5412\r\n..\r\nINFO[0004] DONE \r\n/home/kevin/Projects/linkerd/linkerd2\r\n\r\nLinkerd CLI available:\r\n/home/kevin/Projects/linkerd/linkerd2/target/release/linkerd2-cli-git-65f0d802-linux-amd64", + "❯ env GITHUB_TOKEN='..' bin/install-pr --k3d 5412\r\n..\r\nINFO[0004] DONE \r\n/home/kevin/Projects/linkerd/linkerd2\r\n\r\nLinkerd CLI available:\r\n/home/kevin/Projects/linkerd/linkerd2/target/release/linkerd2-cli-git-65f0d802-linux-amd64", + "./target/release/linkerd2-cli-git-65f0d802-linux-amd64 install |kubectl apply -f -" + ] + } + }, + "metadata": { + "tags": [ + "linkerd-viz", + "graduated", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "linkerd-viz" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/linkerd/linkerd2/pull/5412", + "repo": "https://github.com/linkerd/linkerd2", + "pr": "https://github.com/kubestellar/console-kb/pull/6" + }, + "reactions": 6, + "comments": 14, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with linkerd-viz installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:39.509Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd-viz/linkerd-viz-9113-add-podmonitor-resources-to-the-helm-chart.json b/solutions/cncf-generated/linkerd-viz/linkerd-viz-9113-add-podmonitor-resources-to-the-helm-chart.json new file mode 100644 index 00000000..0a155549 --- /dev/null +++ b/solutions/cncf-generated/linkerd-viz/linkerd-viz-9113-add-podmonitor-resources-to-the-helm-chart.json @@ -0,0 +1,77 @@ +{ + "version": "kc-mission-v1", + "name": "linkerd-viz-9113-add-podmonitor-resources-to-the-helm-chart", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "linkerd-viz: Add PodMonitor resources to the Helm chart", + "description": "Add PodMonitor resources to the Helm chart\n\nWith an external Prometheus setup installed using prometheus-operator the Prometheus instance scraping can be configured using Service/PodMonitor resources.\n\nBy adding PodMonitor resource into Linkerd Helm chart we can mimic the configuration of bundled Prometheus, see https://github.com/linkerd/linkerd2/blob/main/viz/charts/linkerd-viz/templates/prometheus.yaml#L47-L151, that comes with linkerd-viz extension. The PodMonitor resources are based on http", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Add PodMonitor resources to the Helm chart\n\nWith an external Prometheus setup installed using prometheus-operator the Prometheus instance scraping can be configured using Service/PodMonitor resources." + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nSubject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Also, if you check the diff after running `go test ./... -update` you'll see `podmonitor.yaml` is introducing an empty line in the output. Can you please check that out? It's gotta be something related to the `{{-` `-}}` space-suppressing tags.", + "codeSnippets": [ + "Subject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off", + "Subject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off", + "Introduce Pull Request Template\r\n\r\nGitHub's community guidelines recommend a pull request template, the repo was\r\nlacking one.\r\n\r\nIntroduce a `PULL_REQUEST_TEMPLATE.md` file.\r\n\r\nOnce merged, the\r\n[Community profile checklist](https://github.com/linkerd/linkerd2/community)\r\nshould indicate the repo now provides a pull request template.\r\n\r\nFixes #3321\r\n\r\nSigned-off-by: Jane Smith " + ] + } + }, + "metadata": { + "tags": [ + "linkerd-viz", + "graduated", + "observability", + "deploy" + ], + "cncfProjects": [ + "linkerd-viz" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Job" + ], + "difficulty": "beginner", + "issueTypes": [ + "deploy" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/linkerd/linkerd2/pull/9113", + "repo": "https://github.com/linkerd/linkerd2" + }, + "reactions": 4, + "comments": 3, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with linkerd-viz installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:45.286Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric.json b/solutions/cncf-generated/linkerd/linkerd-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric.json new file mode 100644 index 00000000..106d6932 --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "linkerd-13615-expose-issuer-certificate-ttl-as-a-prometheus-metric", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "linkerd: expose issuer certificate TTL as a prometheus metric", + "description": "Problem: There is currently no simple way to monitor the expiration time of the issuer certificate in use by linkerd; a surprising omission considering that issuer cert expiration will almost certainly cause visible cluster issues.\n\nSolution: \n\n- When a new issuer certificate is loaded, log its NotAfter time in unix epoch format, along with the current process wall clock time. The two timestamps are passed in via the logrus Fields pattern, allowing operators to easily pull these numbers from pod", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Problem: There is currently no simple way to monitor the expiration time of the issuer certificate in use by linkerd; a surprising omission considering that issuer cert expiration will almost certainl" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nSubject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubestellar/console-kb/pull/6. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# CNCF Mission Generation Report (All Batches)\n\n**Date:** 2026-02-27T17:50:38Z\n**Batches:** 11\n\n---\n## Batch: generation-report-0.md\n# CNCF Mission Generation Report\n\n**Date:** 2026-02-27T17:47:59.472Z\n**Total generated:** 184\n**Skipped (duplicates):** 0\n**Errors:** 0\n\n## Projects Processed\n\n| Project | Maturity | Issues Found | Missions Generated | Errors |\n|---------|----------|-------------|-------------------|--------|\n| argo | graduated | 20 | 20 | 0 |\n| cert-manager | graduated | 17 | 17 | 0 |\n| cilium | graduated | 15 | 15 | 0 |\n| cloudevents | graduated | 3 | 3 | 0 |\n| containerd | graduated | 15 | 15 | 0 |\n| coredns | graduated | 6 | 6 | 0 |\n| cri-o | graduated | 14 | 14 | 0 |\n| crossplane | graduated | 8 | 8 | 0 |\n| cubefs | graduated | 0 | 0 | 0 |\n| dapr | graduated | 8 | 8 | 0 |\n| dragonfly | graduated | 0 | 0 | 0 |\n| envoy | graduated | 16 | 16 | 0 |\n| etcd | graduated | 12 | 12 | 0 |\n| falco | graduated | 9 | 9 | 0 |\n| fluentd | graduated | 2 | 2 | 0 |\n| flux | graduated | 0 | 0 | 0 |\n| harbor | graduated | 7 | 7 | 0 |\n| helm | graduated | 20 | 20 | 0 |\n| in-toto | graduated | 3 | 3 | 0 |\n| istio | graduated | 9 | 9 | 0 |\n\n## Generated Missions\n\n- **argo: feat: Multiple sources for applications** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/10432)\n- **argo: feat: Extra Helm values from external git repo #5826** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/6280)\n- **argo: feat(application-controller): Add support for rollback", + "codeSnippets": [ + "Subject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off", + "Subject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off", + "Introduce Pull Request Template\r\n\r\nGitHub's community guidelines recommend a pull request template, the repo was\r\nlacking one.\r\n\r\nIntroduce a `PULL_REQUEST_TEMPLATE.md` file.\r\n\r\nOnce merged, the\r\n[Community profile checklist](https://github.com/linkerd/linkerd2/community)\r\nshould indicate the repo now provides a pull request template.\r\n\r\nFixes #3321\r\n\r\nSigned-off-by: Jane Smith ", + "=== Skipped\r\n=== SKIP: viz/cmd TestRequestTapByResourceFromAPI/Should_return_error_if_stream_returned_error (0.00s)\r\n --- SKIP: TestRequestTapByResourceFromAPI/Should_return_error_if_stream_returned_error (0.00s)\r\n\r\nDONE 1063 tests, 1 skipped in 94.401s" + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking", + "troubleshoot" + ], + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/linkerd/linkerd2/pull/13615", + "repo": "https://github.com/linkerd/linkerd2", + "pr": "https://github.com/kubestellar/console-kb/pull/6" + }, + "reactions": 5, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with linkerd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:20.368Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-2893-added-anti-affinity-when-ha-is-configured.json b/solutions/cncf-generated/linkerd/linkerd-2893-added-anti-affinity-when-ha-is-configured.json new file mode 100644 index 00000000..2653962d --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-2893-added-anti-affinity-when-ha-is-configured.json @@ -0,0 +1,76 @@ +{ + "version": "kc-mission-v1", + "name": "linkerd-2893-added-anti-affinity-when-ha-is-configured", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "linkerd: Added Anti Affinity when HA is configured", + "description": "The following PR adds anti-affinity rules to `proxy-injector`, `sp-validator`, `linkerd-controller`, `tap` deployments.\n\nThe idea was to make anti-affinity rules both based on `kubernetes.io/hostname` and `failure-domain.beta.kubernetes.io/zone` **preferred** when only the the `--ha` flag is configured.\n\nif the `--required-host-anti-affinity` is also configured along with `--ha`, then the `kubernetes.io/hostname` is **required** while `failure-domain.beta.kubernetes.io/zone` is still **preferred", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "The following PR adds anti-affinity rules to `proxy-injector`, `sp-validator`, `linkerd-controller`, `tap` deployments.\n\nThe idea was to make anti-affinity rules both based on `kubernetes.io/hostname`" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nlinkerd-existence\r\n-----------------\r\n√ 'linkerd-config' config map exists\r\n√ control plane replica sets are ready\r\n× no unschedulable pods\r\n linkerd-controller-7d9bdd85b8-2sk7q: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-74vsr: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> Defaulting to preferred feels like we’re introducing two types of HA, “kinda” HA and “real” HA.\n\n:+1: Agreed.\n\n@grampelberg @Pothulapati So to summarize,\n\n* Remove the `--required-host-anti-affinity` option\n* By default, HA mode uses `required` hostname anti-affinity and `preferred` zone anti-affinity (there will almost always be more replicas than zones, so `required` zone anti-affinity won't work.)\n\nIf the `required` hostname anti-affinity is causing an installation failure, `linkerd check` actually detects the problem and outputs the reason:\n\n```\nlinkerd-existence\n-----------------\n√ 'linkerd-config' config map exists\n√ control plane replica sets are ready\n× no unschedulable pods\n linkerd-controller-7d9bdd85b8-2sk7q: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\n linkerd-controller-7d9bdd85b8-74vsr: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\n linkerd-controller-7d9bdd85b8-97lds: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\n linkerd-controller-7d9bdd85b8-9lj9v: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\n linkerd-contro", + "codeSnippets": [ + "linkerd-existence\r\n-----------------\r\n√ 'linkerd-config' config map exists\r\n√ control plane replica sets are ready\r\n× no unschedulable pods\r\n linkerd-controller-7d9bdd85b8-2sk7q: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-74vsr: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy", + "linkerd-existence\r\n-----------------\r\n√ 'linkerd-config' config map exists\r\n√ control plane replica sets are ready\r\n× no unschedulable pods\r\n linkerd-controller-7d9bdd85b8-2sk7q: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-74vsr: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-97lds: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-9lj9v: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-9sx8d: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-kbzwn: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-lcgww: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-7d9bdd85b8-zmsn4: 0/6 nodes are available: 1 Insufficient cpu, 5 node(s) didn't match pod affinity/anti-affinity, 5 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-proxy-injector-76c4f5c7d9-98gm8: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-proxy-injector-76c4f5c7d9-w7jwc: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-sp-validator-6bc6cc666b-4k5zs: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-sp-validator-6bc6cc666b-qvkdb: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-tap-8688fdf4f-c5tgb: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-tap-8688fdf4f-wmlnz: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n see https://linkerd.io/checks/#l5d-existence-unschedulable-pods for hints", + "✗ k get no -L failure-domain.beta.kubernetes.io/zone\r\nNAME STATUS ROLES AGE VERSION ZONE\r\ngke-isim-dev-ha-default-pool-4b003e42-1m9b Ready 22h v1.13.7-gke.8 us-east1-b\r\ngke-isim-dev-ha-default-pool-4b003e42-p1ps Ready 22h v1.13.7-gke.8 us-east1-b\r\ngke-isim-dev-ha-default-pool-560160bf-7sl6 Ready 20h v1.13.7-gke.8 us-east1-c\r\ngke-isim-dev-ha-default-pool-560160bf-cnzh Ready 22h v1.13.7-gke.8 us-east1-c\r\ngke-isim-dev-ha-default-pool-f4da19f4-4n3c Ready 22h v1.13.7-gke.8 us-east1-d\r\ngke-isim-dev-ha-default-pool-f4da19f4-g79p Ready 22h v1.13.7-gke.8 us-east1-d\r\n\r\n✗ k -n linkerd get po -owide\r\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\r\nlinkerd-controller-5b5765b845-7cnpz 3/3 Running 0 18m 10.60.1.27 gke-isim-dev-ha-default-pool-4b003e42-p1ps # us-east1-b\r\nlinkerd-controller-5b5765b845-j6ss9 3/3 Running 0 18m 10.60.3.20 gke-isim-dev-ha-default-pool-560160bf-cnzh # us-east1-c\r\nlinkerd-controller-5b5765b845-r92md 3/3 Running 0 18m 10.60.4.23 gke-isim-dev-ha-default-pool-f4da19f4-4n3c # us-east1-d\r\nlinkerd-grafana-7df55df848-888ck 2/2 Running 0 12m 10.60.1.30 gke-isim-dev-ha-default-pool-4b003e42-p1ps \r\nlinkerd-identity-74cf6f446f-7r57m 2/2 Running 0 18m 10.60.4.22 gke-isim-dev-ha-default-pool-f4da19f4-4n3c # us-east1-d\r\nlinkerd-identity-74cf6f446f-b9gfc 2/2 Running 0 18m 10.60.1.26 gke-isim-dev-ha-default-pool-4b003e42-p1ps #us-east1-b \r\nlinkerd-identity-74cf6f446f-fbw5w 2/2 Running 0 18m 10.60.2.20 gke-isim-dev-ha-default-pool-560160bf-7sl6 # us-east1-c\r\nlinkerd-prometheus-7bcc6c5b66-rv7n4 2/2 Running 3 18m 10.60.0.17 gke-isim-dev-ha-default-pool-4b003e42-1m9b \r\nlinkerd-proxy-injector-746bfbb494-8w8qk 2/2 Running 0 18m 10.60.3.21 gke-isim-dev-ha-default-pool-560160bf-cnzh # us-east1-c\r\nlinkerd-proxy-injector-746bfbb494-s42qm 2/2 Running 0 18m 10.60.4.24 gke-isim-dev-ha-default-pool-f4da19f4-4n3c # us-east1-d\r\nlinkerd-proxy-injector-746bfbb494-wlxxf 2/2 Running 0 18m 10.60.1.28 gke-isim-dev-ha-default-pool-4b003e42-p1ps # us-east1-b\r\nlinkerd-sp-validator-6947dff89c-4nqrs 2/2 Running 0 18m 10.60.0.18 gke-isim-dev-ha-default-pool-4b003e42-1m9b # us-east1-b\r\nlinkerd-sp-validator-6947dff89c-64vv9 2/2 Running 0 18m 10.60.2.23 gke-isim-dev-ha-default-pool-560160bf-7sl6 # us-east1-c\r\nlinkerd-sp-validator-6947dff89c-cdd4q 2/2 Running 0 18m 10.60.5.20 gke-isim-dev-ha-default-pool-f4da19f4-g79p # us-east1-d\r\nlinkerd-tap-5d7745b8c8-n9xwj 2/2 Running 0 18m 10.60.5.21 gke-isim-dev-ha-default-pool-f4da19f4-g79p # us-east1-d\r\nlinkerd-tap-5d7745b8c8-q9rtx 2/2 Running 0 18m 10.60.3.22 gke-isim-dev-ha-default-pool-560160bf-cnzh # us-east1-c\r\nlinkerd-tap-5d7745b8c8-rjznc 2/2 Running 0 18m 10.60.1.29 gke-isim-dev-ha-default-pool-4b003e42-p1ps # us-east1-b\r\nlinkerd-web-7cd4bf9d7-cd4bv 2/2 Running 0 18m 10.60.2.21 gke-isim-dev-ha-default-pool-560160bf-7sl6 ", + "✗ k -n linkerd scale deploy/linkerd-controller --replicas=10\r\ndeployment.extensions/linkerd-controller scaled\r\n✗ linkerd check\r\nlinkerd-existence\r\n-----------------\r\n√ 'linkerd-config' config map exists\r\n√ control plane replica sets are ready\r\n× no unschedulable pods\r\n linkerd-controller-5b5765b845-9nq7d: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-5b5765b845-ncztn: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-5b5765b845-tgdlp: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n linkerd-controller-5b5765b845-xshbc: 0/6 nodes are available: 6 node(s) didn't match pod affinity/anti-affinity, 6 node(s) didn't satisfy existing pods anti-affinity rules.\r\n see https://linkerd.io/checks/#l5d-existence-unschedulable-pods for hints" + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking", + "troubleshoot" + ], + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/linkerd/linkerd2/pull/2893", + "repo": "https://github.com/linkerd/linkerd2" + }, + "reactions": 2, + "comments": 13, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with linkerd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:31.305Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-3470-the-linkerd-proxy-does-not-work-with-headless-services.json b/solutions/cncf-generated/linkerd/linkerd-3470-the-linkerd-proxy-does-not-work-with-headless-services.json new file mode 100644 index 00000000..6f583d72 --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-3470-the-linkerd-proxy-does-not-work-with-headless-services.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "linkerd-3470-the-linkerd-proxy-does-not-work-with-headless-services", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "linkerd: The linkerd proxy does not work with headless services", + "description": "The linkerd proxy does not work with headless services (i.e. endpoints not referencing a pod).\n\nChanged endpoints_watcher to also return endpoints with no targetref. Changed endpoint_translator to handle addresses with no associated pod.\n\nRan tests in minikube verifying that the proxy handles headless services correctly both in cases with and without port-remapping.\n\nSigned-of-by: Johannes Hansen ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "The linkerd proxy does not work with headless services (i.e. endpoints not referencing a pod).\n\nChanged endpoints_watcher to also return endpoints with no targetref. Changed endpoint_translator to han" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\npanic: runtime error: invalid memory address or nil pointer dereference\r\n[signal SIGSEGV: segmentation violation code=0x1 addr=0xb0 pc=0x13feefa]\r\n\r\ngoroutine 288 [running]:\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTranslator).toWeightedAddr(0xc000969630, 0xc000427300, 0xc, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0004d1f40, ...)\r\n\t/linkerd-build/controller/api/destination/endpoint_translator.go:123 +0x3a\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTransla\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubestellar/console-kb/pull/6. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# CNCF Mission Generation Report (All Batches)\n\n**Date:** 2026-02-27T17:50:38Z\n**Batches:** 11\n\n---\n## Batch: generation-report-0.md\n# CNCF Mission Generation Report\n\n**Date:** 2026-02-27T17:47:59.472Z\n**Total generated:** 184\n**Skipped (duplicates):** 0\n**Errors:** 0\n\n## Projects Processed\n\n| Project | Maturity | Issues Found | Missions Generated | Errors |\n|---------|----------|-------------|-------------------|--------|\n| argo | graduated | 20 | 20 | 0 |\n| cert-manager | graduated | 17 | 17 | 0 |\n| cilium | graduated | 15 | 15 | 0 |\n| cloudevents | graduated | 3 | 3 | 0 |\n| containerd | graduated | 15 | 15 | 0 |\n| coredns | graduated | 6 | 6 | 0 |\n| cri-o | graduated | 14 | 14 | 0 |\n| crossplane | graduated | 8 | 8 | 0 |\n| cubefs | graduated | 0 | 0 | 0 |\n| dapr | graduated | 8 | 8 | 0 |\n| dragonfly | graduated | 0 | 0 | 0 |\n| envoy | graduated | 16 | 16 | 0 |\n| etcd | graduated | 12 | 12 | 0 |\n| falco | graduated | 9 | 9 | 0 |\n| fluentd | graduated | 2 | 2 | 0 |\n| flux | graduated | 0 | 0 | 0 |\n| harbor | graduated | 7 | 7 | 0 |\n| helm | graduated | 20 | 20 | 0 |\n| in-toto | graduated | 3 | 3 | 0 |\n| istio | graduated | 9 | 9 | 0 |\n\n## Generated Missions\n\n- **argo: feat: Multiple sources for applications** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/10432)\n- **argo: feat: Extra Helm values from external git repo #5826** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/6280)\n- **argo: feat(application-controller): Add support for rollback", + "codeSnippets": [ + "panic: runtime error: invalid memory address or nil pointer dereference\r\n[signal SIGSEGV: segmentation violation code=0x1 addr=0xb0 pc=0x13feefa]\r\n\r\ngoroutine 288 [running]:\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTranslator).toWeightedAddr(0xc000969630, 0xc000427300, 0xc, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0004d1f40, ...)\r\n\t/linkerd-build/controller/api/destination/endpoint_translator.go:123 +0x3a\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTransla", + "panic: runtime error: invalid memory address or nil pointer dereference\r\n[signal SIGSEGV: segmentation violation code=0x1 addr=0xb0 pc=0x13feefa]\r\n\r\ngoroutine 288 [running]:\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTranslator).toWeightedAddr(0xc000969630, 0xc000427300, 0xc, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0004d1f40, ...)\r\n\t/linkerd-build/controller/api/destination/endpoint_translator.go:123 +0x3a\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*endpointTranslator).Add(0xc000969630, 0xc00049c5d0)\r\n\t/linkerd-build/controller/api/destination/endpoint_translator.go:50 +0x1c5\r\ngithub.com/linkerd/linkerd2/controller/api/destination/watcher.(*portPublisher).subscribe(0xc0004ae340, 0x1b1d100, 0xc000969630)\r\n\t/linkerd-build/controller/api/destination/watcher/endpoints_watcher.go:501 +0x68\r\ngithub.com/linkerd/linkerd2/controller/api/destination/watcher.(*servicePublisher).subscribe(0xc000422480, 0xc000000050, 0x0, 0x0, 0x1b1d100, 0xc000969630)\r\n\t/linkerd-build/controller/api/destination/watcher/endpoints_watcher.go:322 +0xda\r\ngithub.com/linkerd/linkerd2/controller/api/destination/watcher.(*EndpointsWatcher).Subscribe(0xc0004064e0, 0xc0005806d2, 0x5, 0xc0005806c0, 0x11, 0xc000000050, 0x0, 0x0, 0x1b1d100, 0xc000969630, ...)\r\n\t/linkerd-build/controller/api/destination/watcher/endpoints_watcher.go:158 +0x290\r\ngithub.com/linkerd/linkerd2/controller/api/destination.(*server).Get(0xc0003a8a20, 0xc0009694f0, 0x1b4a800, 0xc0005916d0, 0x0, 0x0)\r\n\t/linkerd-build/controller/api/destination/server.go:123 +0x70e\r\ngithub.com/linkerd/linkerd2-proxy-api/go/destination._Destination_Get_Handler(0x16aaf00, 0xc0003a8a20, 0x1b42280, 0xc0006a4ec0, 0xc0009694a0, 0x20)\r\n\t/go/pkg/mod/github.com/linkerd/linkerd2-proxy-api@v0.1.9/go/destination/destination.pb.go:1823 +0x109\r\ngithub.com/grpc-ecosystem/go-grpc-prometheus.StreamServerInterceptor(0x16aaf00, 0xc0003a8a20, 0x1b424c0, 0xc0002de3c0, 0xc0006a4d60, 0x192c7d0, 0x1b30440, 0xc000329c20)\r\n\t/go/pkg/mod/github.com/grpc-ecosystem/go-grpc-prometheus@v0.0.0-20160910222444-6b7015e65d36/server.go:40 +0xe3\r\ngoogle.golang.org/grpc.(*Server).processStreamingRPC(0xc0000ad980, 0x1b55a20, 0xc000766480, 0xc0004aa900, 0xc0004069c0, 0x294db00, 0x0, 0x0, 0x0)\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:1209 +0x462\r\ngoogle.golang.org/grpc.(*Server).handleStream(0xc0000ad980, 0x1b55a20, 0xc000766480, 0xc0004aa900, 0x0)\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:1282 +0xd3f\r\ngoogle.golang.org/grpc.(*Server).serveStreams.func1.1(0xc000322710, 0xc0000ad980, 0x1b55a20, 0xc000766480, 0xc0004aa900)\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:717 +0x9f\r\ncreated by google.golang.org/grpc.(*Server).serveStreams.func1\r\n\t/go/pkg/mod/google.golang.org/grpc@v1.22.0/server.go:715 +0xa1", + "YAML I used for testing:", + "Using a `curl` container:" + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking", + "troubleshoot" + ], + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [ + "Pod", + "Service" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/linkerd/linkerd2/pull/3470", + "repo": "https://github.com/linkerd/linkerd2", + "pr": "https://github.com/kubestellar/console-kb/pull/6" + }, + "reactions": 2, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with linkerd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:32.780Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addresses.json b/solutions/cncf-generated/linkerd/linkerd-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addresses.json new file mode 100644 index 00000000..091e36ae --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addresses.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "linkerd-5412-ignore-pods-with-status-phase-succeeded-when-watching-ip-addresses", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "linkerd: Ignore pods with status.phase=Succeeded when watching IP addresses", + "description": "Ignore pods with status.phase=Succeeded when watching IP addresses\n\nWhen a pod terminates successfully, some CNIs will assign its IP address\nto newly created pods. This can lead to duplicate pod IPs in the same\nKubernetes cluster.\n\nFilter out pods which are in a Succeeded phase since they are not \nroutable anymore.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Ignore pods with status.phase=Succeeded when watching IP addresses\n\nWhen a pod terminates successfully, some CNIs will assign its IP address\nto newly created pods. This can lead to duplicate pod IPs i" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n❯ env GITHUB_TOKEN='..' bin/install-pr --k3d 5412\r\n..\r\nINFO[0004] DONE \r\n/home/kevin/Projects/linkerd/linkerd2\r\n\r\nLinkerd CLI available:\r\n/home/kevin/Projects/linkerd/linkerd2/target/release/linkerd2-cli-git-65f0d802-linux-amd64\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubestellar/console-kb/pull/6. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# CNCF Mission Generation Report (All Batches)\n\n**Date:** 2026-02-27T17:50:38Z\n**Batches:** 11\n\n---\n## Batch: generation-report-0.md\n# CNCF Mission Generation Report\n\n**Date:** 2026-02-27T17:47:59.472Z\n**Total generated:** 184\n**Skipped (duplicates):** 0\n**Errors:** 0\n\n## Projects Processed\n\n| Project | Maturity | Issues Found | Missions Generated | Errors |\n|---------|----------|-------------|-------------------|--------|\n| argo | graduated | 20 | 20 | 0 |\n| cert-manager | graduated | 17 | 17 | 0 |\n| cilium | graduated | 15 | 15 | 0 |\n| cloudevents | graduated | 3 | 3 | 0 |\n| containerd | graduated | 15 | 15 | 0 |\n| coredns | graduated | 6 | 6 | 0 |\n| cri-o | graduated | 14 | 14 | 0 |\n| crossplane | graduated | 8 | 8 | 0 |\n| cubefs | graduated | 0 | 0 | 0 |\n| dapr | graduated | 8 | 8 | 0 |\n| dragonfly | graduated | 0 | 0 | 0 |\n| envoy | graduated | 16 | 16 | 0 |\n| etcd | graduated | 12 | 12 | 0 |\n| falco | graduated | 9 | 9 | 0 |\n| fluentd | graduated | 2 | 2 | 0 |\n| flux | graduated | 0 | 0 | 0 |\n| harbor | graduated | 7 | 7 | 0 |\n| helm | graduated | 20 | 20 | 0 |\n| in-toto | graduated | 3 | 3 | 0 |\n| istio | graduated | 9 | 9 | 0 |\n\n## Generated Missions\n\n- **argo: feat: Multiple sources for applications** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/10432)\n- **argo: feat: Extra Helm values from external git repo #5826** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/6280)\n- **argo: feat(application-controller): Add support for rollback", + "codeSnippets": [ + "❯ env GITHUB_TOKEN='..' bin/install-pr --k3d 5412\r\n..\r\nINFO[0004] DONE \r\n/home/kevin/Projects/linkerd/linkerd2\r\n\r\nLinkerd CLI available:\r\n/home/kevin/Projects/linkerd/linkerd2/target/release/linkerd2-cli-git-65f0d802-linux-amd64", + "❯ env GITHUB_TOKEN='..' bin/install-pr --k3d 5412\r\n..\r\nINFO[0004] DONE \r\n/home/kevin/Projects/linkerd/linkerd2\r\n\r\nLinkerd CLI available:\r\n/home/kevin/Projects/linkerd/linkerd2/target/release/linkerd2-cli-git-65f0d802-linux-amd64", + "./target/release/linkerd2-cli-git-65f0d802-linux-amd64 install |kubectl apply -f -" + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking", + "troubleshoot" + ], + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/linkerd/linkerd2/pull/5412", + "repo": "https://github.com/linkerd/linkerd2", + "pr": "https://github.com/kubestellar/console-kb/pull/6" + }, + "reactions": 6, + "comments": 14, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with linkerd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:17.203Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/linkerd/linkerd-9113-add-podmonitor-resources-to-the-helm-chart.json b/solutions/cncf-generated/linkerd/linkerd-9113-add-podmonitor-resources-to-the-helm-chart.json new file mode 100644 index 00000000..ae6a5a6d --- /dev/null +++ b/solutions/cncf-generated/linkerd/linkerd-9113-add-podmonitor-resources-to-the-helm-chart.json @@ -0,0 +1,77 @@ +{ + "version": "kc-mission-v1", + "name": "linkerd-9113-add-podmonitor-resources-to-the-helm-chart", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "linkerd: Add PodMonitor resources to the Helm chart", + "description": "Add PodMonitor resources to the Helm chart\n\nWith an external Prometheus setup installed using prometheus-operator the Prometheus instance scraping can be configured using Service/PodMonitor resources.\n\nBy adding PodMonitor resource into Linkerd Helm chart we can mimic the configuration of bundled Prometheus, see https://github.com/linkerd/linkerd2/blob/main/viz/charts/linkerd-viz/templates/prometheus.yaml#L47-L151, that comes with linkerd-viz extension. The PodMonitor resources are based on http", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Add PodMonitor resources to the Helm chart\n\nWith an external Prometheus setup installed using prometheus-operator the Prometheus instance scraping can be configured using Service/PodMonitor resources." + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nSubject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Also, if you check the diff after running `go test ./... -update` you'll see `podmonitor.yaml` is introducing an empty line in the output. Can you please check that out? It's gotta be something related to the `{{-` `-}}` space-suppressing tags.", + "codeSnippets": [ + "Subject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off", + "Subject\r\n\r\nProblem\r\n\r\nSolution\r\n\r\nValidation\r\n\r\nFixes #[GitHub issue ID]\r\n\r\nDCO Sign off", + "Introduce Pull Request Template\r\n\r\nGitHub's community guidelines recommend a pull request template, the repo was\r\nlacking one.\r\n\r\nIntroduce a `PULL_REQUEST_TEMPLATE.md` file.\r\n\r\nOnce merged, the\r\n[Community profile checklist](https://github.com/linkerd/linkerd2/community)\r\nshould indicate the repo now provides a pull request template.\r\n\r\nFixes #3321\r\n\r\nSigned-off-by: Jane Smith " + ] + } + }, + "metadata": { + "tags": [ + "linkerd", + "graduated", + "networking", + "deploy" + ], + "cncfProjects": [ + "linkerd" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Job" + ], + "difficulty": "beginner", + "issueTypes": [ + "deploy" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/linkerd/linkerd2/pull/9113", + "repo": "https://github.com/linkerd/linkerd2" + }, + "reactions": 4, + "comments": 3, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with linkerd installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:23.041Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-562-rfc-fix-conflicting-arp-when-ip-is-shared.json b/solutions/cncf-generated/metallb/metallb-562-rfc-fix-conflicting-arp-when-ip-is-shared.json new file mode 100644 index 00000000..d05c6a5e --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-562-rfc-fix-conflicting-arp-when-ip-is-shared.json @@ -0,0 +1,84 @@ +{ + "version": "kc-mission-v1", + "name": "metallb-562-rfc-fix-conflicting-arp-when-ip-is-shared", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "metallb: [RFC] Fix conflicting ARP when IP is shared", + "description": "This is just a RFC to show the idea. Not tested yet.\n\nWhen IP is shared by two services, it may be annonced from different\nnodes causing conflicting arp responses.\n\nThis commit fixes by using service IP in the hash instead of service\nname so that service sharing the same IP will have the same master.\n\nFor traffic cluster services, all nodes should be usable instead of\nthose running pods, so that services sharing IPs have the same set of\nusable nodes.\n\nFixed #558", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This is just a RFC to show the idea. Not tested yet.\n\nWhen IP is shared by two services, it may be annonced from different\nnodes causing conflicting arp responses.\n\nThis commit fixes by using service " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\ndocker.io/kvaps/metallb-controller:a3047c4d\r\ndocker.io/kvaps/metallb-speaker:a3047c4d\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/metallb/metallb/pull/665. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "As the issue states, even if service specifies `externalTrafficPolicy: Cluster`, MetalLB will not announce if no endpoint reside in the node with speakers. This can occur if speakers are deployed only on a subset of nodes in the cluster.\n\nThis commit makes use of `activeNodes` as a fallback if no usable nodes are available. If memberlist is not enabled, it will act same as if the fallback is disabled.\nThe idea is basically same as #613, with minor code implementation differences.", + "codeSnippets": [ + "docker.io/kvaps/metallb-controller:a3047c4d\r\ndocker.io/kvaps/metallb-speaker:a3047c4d", + "docker.io/kvaps/metallb-controller:a3047c4d\r\ndocker.io/kvaps/metallb-speaker:a3047c4d" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition", + "troubleshoot", + "bug", + "protocol-layer2", + "do-not-merge" + ], + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Node" + ], + "difficulty": "advanced", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/metallb/metallb/pull/562", + "repo": "https://github.com/metallb/metallb", + "pr": "https://github.com/metallb/metallb/pull/665" + }, + "reactions": 3, + "comments": 33, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with metallb installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:41:50.185Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-658-re-add-helm-chart-under-charts-metallb.json b/solutions/cncf-generated/metallb/metallb-658-re-add-helm-chart-under-charts-metallb.json new file mode 100644 index 00000000..1c803bee --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-658-re-add-helm-chart-under-charts-metallb.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "metallb-658-re-add-helm-chart-under-charts-metallb", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "metallb: Re-add helm chart under charts/metallb", + "description": "Create a new helm chart integrating features of the previous chart, as\nwell as stable/metallb and bitnami/metallb.\n\nFeatures:\n* Support new environment variables in speaker\n* Use PodMonitor instead of ServiceMonitor (no need to create Services)\n* Create PrometheusRule to detect stale config and config-not-loaded\n* Configurable PrometheusRule alerts for address pool exhaustion\n* Support MetalLB controller creating memberlist secret\n* Standardize labels using template helper\n* Create config-watche", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Create a new helm chart integrating features of the previous chart, as\nwell as stable/metallb and bitnami/metallb.\n\nFeatures:\n* Support new environment variables in speaker\n* Use PodMonitor instead of" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nset -ex\r\n\r\n # install openssl and kubectl\r\n echo \"http://dl-cdn.alpinelinux.org/alpine/edge/testing\" >> /etc/apk/repositories\r\n apk --no-cache add openssl kubectl\r\n\r\n # generate a random string with no newlines\r\n # store in file secret does not leak to logs\r\n openssl rand -base64 128 > /tmp/secret\r\n\r\n # create secret\r\n kubectl \\\r\n create secret generic metallb-memberlist \\\r\n --from-f\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "The one remaining concern I have is related to the `secretgen` Job to generate the memberlist secret. As it is right now, it will not work in air-gapped clusters because it runs a standard `alpine` image and installs `openssl`/`kubectl` on the fly:\n```\n set -ex\n\n # install openssl and kubectl\n echo \"http://dl-cdn.alpinelinux.org/alpine/edge/testing\" >> /etc/apk/repositories\n apk --no-cache add openssl kubectl\n\n # generate a random string with no newlines\n # store in file secret does not leak to logs\n openssl rand -base64 128 > /tmp/secret\n\n # create secret\n kubectl \\\n create secret generic metallb-memberlist \\\n --from-file=secretkey=/tmp/secret\n```\n\nI can overcome this by adding a new image called `metallb/secretgen`, but that'd be more invasive to the `tasks.py`, it would involve creating new tasks for building an image without a metallb binary in it (all the current tasks assume a go binary is built with each image).\n\nUltimately I think we want the secret generation to be handled by the controller, but we may still want a Job to clean up the secret on uninstall of the chart, so an extra image may be warranted regardless.", + "codeSnippets": [ + "set -ex\r\n\r\n # install openssl and kubectl\r\n echo \"http://dl-cdn.alpinelinux.org/alpine/edge/testing\" >> /etc/apk/repositories\r\n apk --no-cache add openssl kubectl\r\n\r\n # generate a random string with no newlines\r\n # store in file secret does not leak to logs\r\n openssl rand -base64 128 > /tmp/secret\r\n\r\n # create secret\r\n kubectl \\\r\n create secret generic metallb-memberlist \\\r\n --from-f", + "set -ex\r\n\r\n # install openssl and kubectl\r\n echo \"http://dl-cdn.alpinelinux.org/alpine/edge/testing\" >> /etc/apk/repositories\r\n apk --no-cache add openssl kubectl\r\n\r\n # generate a random string with no newlines\r\n # store in file secret does not leak to logs\r\n openssl rand -base64 128 > /tmp/secret\r\n\r\n # create secret\r\n kubectl \\\r\n create secret generic metallb-memberlist \\\r\n --from-file=secretkey=/tmp/secret", + "# helm install metallb --namespace metallb-system metallb --version 0.9.4 --set existingConfigMap=config\r\nError: unable to build kubernetes objects from release manifest: error validating \"\": error validating data: [ValidationError(Role.rules[0]): unknown field \"resources:\" in io.k8s.api.rbac.v1.PolicyRule, ValidationError(Role.rules[1]): unknown field \"resources:\" in io.k8s.api.rbac.v1.PolicyRule]", + "# helm install metallb --namespace metallb-system bitnami/metallb --version 0.1.28 --set existingConfigMap=config\r\n...\r\n# helm upgrade metallb --namespace metallb-system metallb --version 0.9.4 --set existingConfigMap=config\r\nRelease \"metallb\" has been upgraded. Happy Helming!\r\nNAME: metallb\r\nLAST DEPLOYED: Sat Oct 24 21:11:26 2020\r\nNAMESPACE: metallb-system\r\nSTATUS: deployed\r\nREVISION: 2", + "NOTES:\r\nMetalLB is now running in the cluster.\r\nLoadBalancer Services in your cluster are now available on the IPs you\r\ndefined in MetalLB's configuration:\r\n\r\nnull\r\n\r\nTo see IP assignments, try `kubectl get services`." + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition", + "deploy", + "enhancement" + ], + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Secret", + "Role" + ], + "difficulty": "intermediate", + "issueTypes": [ + "deploy" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/metallb/metallb/pull/658", + "repo": "https://github.com/metallb/metallb" + }, + "reactions": 22, + "comments": 31, + "synthesizedBy": "regex", + "qualityScore": 73 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with metallb installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:41:47.384Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-833-design-metallb-crd-controller-enhancement.json b/solutions/cncf-generated/metallb/metallb-833-design-metallb-crd-controller-enhancement.json new file mode 100644 index 00000000..6abd4653 --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-833-design-metallb-crd-controller-enhancement.json @@ -0,0 +1,83 @@ +{ + "version": "kc-mission-v1", + "name": "metallb-833-design-metallb-crd-controller-enhancement", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "metallb: design: MetalLB CRD controller enhancement", + "description": "This design document discusses implementing MetalLB Custom Resource definition\"CRD\"\nas a mechanism to configure MetalLB's layer2 and BGP features instead of using ConfigMap.\n\nran mdl against the new md file\n```\nmdl 0001-metallb-crd.md \n```\n\nFixes https://github.com/metallb/metallb/issues/196.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This design document discusses implementing MetalLB Custom Resource definition\"CRD\"\nas a mechanism to configure MetalLB's layer2 and BGP features instead of using ConfigMap.\n\nran mdl against the new m" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nmdl 0001-metallb-crd.md\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/metallb/metallb/pull/593. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## General\n\nThis PR adds a new feature to MetalLB called **peer autodiscovery**. This feature allows MetalLB to create BGP peers by discovering their configuration from annotations and labels on `Node` Kubernetes objects. The rationale and use case for this is explained in https://github.com/metallb/metallb/issues/529.\n\nIn addition to the main functionality, this PR adds a **status endpoint** to the speaker. This is an HTTP endpoint which allows querying the internal state of a speaker. The endpoint is available per protocol (BGP/Layer 2) at the following URI: `http://:7473/status/`.\n\nWhile the endpoint is added generically at the \"protocol\" level, this PR implements a handler for BGP only.\n\nDocumentation as well as sample configuration for this feature are included with this PR.\n\nDiscovered peers (or \"node peers\") can coexist with \"regular\" peers - the user is free to use peer autodiscovery alone, in conjunction with static peer configuration or not use autodiscovery at all. The implementation ensures existing functionality isn't broken and filters duplicate peers in case a node peer that's identical to a static peer is discovered.\n\nFinally, the user has full flexibility in determining exactly which BGP parameters should be discovered automatically as well as which annotations/labels to use for determining the values. The implementation is generic and doesn't expect a specific annotation/label format so as not to couple MetalLB to a specific i", + "codeSnippets": [ + "mdl 0001-metallb-crd.md", + "mdl 0001-metallb-crd.md", + "git clone git@github.com:kinvolk/metallb.git\r\ncd metallb\r\ngit checkout johananl/peer-autodiscovery\r\n\r\n# Run the unit tests\r\ninv test\r\n\r\n# Deploy MetalLB to a local cluster\r\ninv dev-env -p bgp", + "kubectl -n metallb-system get pods", + "NAME READY STATUS RESTARTS AGE\r\ncontroller-5d8c5d4bb4-kgx5k 1/1 Running 0 2m33s\r\nspeaker-g564r 1/1 Running 0 2m33s\r\nspeaker-m6flp 1/1 Running 0 2m6s\r\nspeaker-tkb8v 1/1 Running 0 2m6s" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition", + "troubleshoot", + "design-proposal" + ], + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [ + "Configmap" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/metallb/metallb/pull/833", + "repo": "https://github.com/metallb/metallb", + "pr": "https://github.com/metallb/metallb/pull/593" + }, + "reactions": 3, + "comments": 13, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with metallb installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:41:52.766Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/metallb/metallb-895-implement-leveled-logging.json b/solutions/cncf-generated/metallb/metallb-895-implement-leveled-logging.json new file mode 100644 index 00000000..bb100e56 --- /dev/null +++ b/solutions/cncf-generated/metallb/metallb-895-implement-leveled-logging.json @@ -0,0 +1,94 @@ +{ + "version": "kc-mission-v1", + "name": "metallb-895-implement-leveled-logging", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "metallb: Implement leveled logging", + "description": "This PR implements leveled logging. I set the levels as I've seen fit by looking at their messages - any feedbacks are appreciated.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR implements leveled logging. I set the levels as I've seen fit by looking at their messages - any feedbacks are appreciated." + }, + { + "title": "Stop using Memberlist.Members() as it's racy.", + "description": "Stop using Memberlist.Members() as it's racy." + }, + { + "title": "Remove the direct dependency between SpeakerList and k8s.Client.", + "description": "Remove the direct dependency between SpeakerList and k8s.Client." + }, + { + "title": "Stop polling the API and use watches.", + "description": "Stop polling the API and use watches." + }, + { + "title": "Only consider ready speakers when memberlist is disabled.", + "description": "Only consider ready speakers when memberlist is disabled." + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ inv dev-env -p layer2\r\nkind version\r\nkind v0.10.0 go1.15.7 linux/amd64\r\ngo build -v -o build/amd64/controller/controller -ldflags '-X go.universe.tf/metallb/internal/version.gitCommit=25e1592a -X go.universe.tf/metallb/internal/version.gitBranch=pr-895' go.universe.tf/metallb/controller\r\ngo.universe.tf/metallb/internal/logging\r\n# go.universe.tf/metallb/internal/logging\r\ninternal/logging/logging.go:56:14: level.NewFilter undefined (type string has no field or method NewFilter)\r\ninternal/logging\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/metallb/metallb/pull/595. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## General\n\nThis was a fix for #589. Now it's just the end of the SpeakerList refactor.\n\nAt the beginning, this PR was just a full rework of memberlist and bits of layer2. A part of it was merged along the way and it's still big in part because it undoes https://github.com/metallb/metallb/pull/662/commits/cf494a91b267626753530c47ae482250ae1886eb.\n\nIt does multiple cleanups / fixes:\n\n1) Stop using Memberlist.Members() as it's racy.\n2) Remove the direct dependency between SpeakerList and k8s.Client.\n3) Stop polling the API and use watches.\n4) Only consider ready speakers when memberlist is disabled.\n\nTo achieve no. 3, there is one user-facing change: We introduce a headless speaker service.\n\n## TODO", + "codeSnippets": [ + "$ inv dev-env -p layer2\r\nkind version\r\nkind v0.10.0 go1.15.7 linux/amd64\r\ngo build -v -o build/amd64/controller/controller -ldflags '-X go.universe.tf/metallb/internal/version.gitCommit=25e1592a -X go.universe.tf/metallb/internal/version.gitBranch=pr-895' go.universe.tf/metallb/controller\r\ngo.universe.tf/metallb/internal/logging\r\n# go.universe.tf/metallb/internal/logging\r\ninternal/logging/logging.go:56:14: level.NewFilter undefined (type string has no field or method NewFilter)\r\ninternal/logging", + "$ inv dev-env -p layer2\r\nkind version\r\nkind v0.10.0 go1.15.7 linux/amd64\r\ngo build -v -o build/amd64/controller/controller -ldflags '-X go.universe.tf/metallb/internal/version.gitCommit=25e1592a -X go.universe.tf/metallb/internal/version.gitBranch=pr-895' go.universe.tf/metallb/controller\r\ngo.universe.tf/metallb/internal/logging\r\n# go.universe.tf/metallb/internal/logging\r\ninternal/logging/logging.go:56:14: level.NewFilter undefined (type string has no field or method NewFilter)\r\ninternal/logging/logging.go:147:15: level.AllowAll undefined (type string has no field or method AllowAll)\r\ninternal/logging/logging.go:149:15: level.AllowDebug undefined (type string has no field or method AllowDebug)\r\ninternal/logging/logging.go:151:15: level.AllowInfo undefined (type string has no field or method AllowInfo)\r\ninternal/logging/logging.go:153:15: level.AllowWarn undefined (type string has no field or method AllowWarn)\r\ninternal/logging/logging.go:155:15: level.AllowError undefined (type string has no field or method AllowError)\r\ninternal/logging/logging.go:157:15: level.AllowNone undefined (type string has no field or method AllowNone)", + "$ go version\r\ngo version go1.16.4 linux/amd64" + ] + } + }, + "metadata": { + "tags": [ + "metallb", + "sandbox", + "app-definition", + "troubleshoot" + ], + "cncfProjects": [ + "metallb" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/metallb/metallb/pull/895", + "repo": "https://github.com/metallb/metallb", + "pr": "https://github.com/metallb/metallb/pull/595" + }, + "reactions": 2, + "comments": 26, + "synthesizedBy": "regex", + "qualityScore": 69 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with metallb installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:41:56.243Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/notary-project/notary-project-916-feat-upgrade-to-oci-1-1.json b/solutions/cncf-generated/notary-project/notary-project-916-feat-upgrade-to-oci-1-1.json new file mode 100644 index 00000000..59ca4472 --- /dev/null +++ b/solutions/cncf-generated/notary-project/notary-project-916-feat-upgrade-to-oci-1-1.json @@ -0,0 +1,75 @@ +{ + "version": "kc-mission-v1", + "name": "notary-project-916-feat-upgrade-to-oci-1-1", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "notary-project: feat: upgrade to OCI 1.1", + "description": "This PR upgrades Notation to OCI 1.1.\n\nMajor changes **[UPDATE as of 4/2/2024 after community meeting]**:\n1. New flag `--force-referrers-tag` is introduced. And it is only applied to the `Sign` command. It's default to `true`, and it's NOT an `experimental` flag. The original `experimental` flag `--allow-referrers-api` will be hidden, i.e., description and example will be hidden from help page. It is kept only for backwards compatibility purpose, and a warning will be printed out when user sets ", + "type": "upgrade", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR upgrades Notation to OCI 1.1.\n\nMajor changes **[UPDATE as of 4/2/2024 after community meeting]**:\n1. New flag `--force-referrers-tag` is introduced. And it is only applied to the `Sign` comman" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n# Default behavior: Use the referrers tag schema for backwards compatibility.\r\n notation sign ...\r\n notation sign --force-referrers-tag ...\r\n \r\n # With `--force-referrers-tag=false`: Check the Referrers API first, if not supported, automatically fallback to the referrers tag schema.\r\n notation sign --force-referrers-tag=false ...\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "I am kind of @shizhMSFT 's proposal, i.e., \n\n- `--force-referrers-tag` and its default value is true: \nFrom end users' point of view, they care about how signature can be stored in the registry. Apparently, the signature will be pushed and stored as a \"sha256-xxxxx\" tag in the registry with the default `--force-referrers-tag=true`. So `--force-referrers-tag` seems much more straightforward for end users to understand how the signature is stored in the registry. \n- Keep `--allow-referrers-api` as an alias: \nFor some popular registries that already supported OCI v1.1 and integrated Notation, such as [Harbor](https://goharbor.io/docs/2.9.0/working-with-projects/working-with-images/sign-images/#use-notationexperimental-to-sign-artifacts-with-distribution-spec-v11-mode) and Zot, `--allow-referrers-api` has been referenced in their documentation. Keep the original flag name will be friendly and non-breaking for those.\n\nFor @sudo-bmitch and @priteshbandi 's proposals above, `--force-1-1-compatibility` and `force-oci-1-1` sound a bit vague and technical because most of end users are not aware of the OCI spec and different versions. In general, users care about which signature format they can use to store the signature in registry.", + "codeSnippets": [ + "# Default behavior: Use the referrers tag schema for backwards compatibility.\r\n notation sign ...\r\n notation sign --force-referrers-tag ...\r\n \r\n # With `--force-referrers-tag=false`: Check the Referrers API first, if not supported, automatically fallback to the referrers tag schema.\r\n notation sign --force-referrers-tag=false ...", + "# Default behavior: Use the referrers tag schema for backwards compatibility.\r\n notation sign ...\r\n notation sign --force-referrers-tag ...\r\n \r\n # With `--force-referrers-tag=false`: Check the Referrers API first, if not supported, automatically fallback to the referrers tag schema.\r\n notation sign --force-referrers-tag=false ...", + "
\n\n[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/notaryproject/notation/pull/916?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=notaryproject). \n:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=notaryproject).\n\nThis may be too late to change, but if not, I'd suggest renaming the flag to something more clear to end users:", + "Default the value to true for now, and on a 2.x release, change the default to false.\r\n\r\nFrom an outside view, `--allow-referrers-api` is confusing since the signing operation isn't calling the API and the client has no say in whether the registry will include the content in the referrers response, that's automatic based on the `subject` field.\r\n\r\nEdit: note this is a focus on the notation version, rather than the OCI version, since users would more likely know the version of their notation clients while they don't necessarily know the version of their registry.\n> This may be too late to change, but if not, I'd suggest renaming the flag to something more clear to end users:\r\n> \r\n>", + "> \r\n> Default the value to true for now, and on a 2.x release, change the default to false.\r\n> \r\n> From an outside view, `--allow-referrers-api` is confusing since the signing operation isn't calling the API and the client has no say in whether the registry will include the content in the referrers response, that's automatic based on the `subject` field.\r\n\r\n+1, can we add `--force-oci-1-1` and make `--allow-referrers-api` its alias ? \nI'm actually seeing two suggestions here:\r\n1. Suggested by @sudo-bmitch : `--force-1-1-compatibility`, here `1-1` means notation 1.1.0 and before. This flag is default to `true`. In this case:" + ] + } + }, + "metadata": { + "tags": [ + "notary-project", + "incubating", + "security", + "upgrade" + ], + "cncfProjects": [ + "notary-project" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "upgrade" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/notaryproject/notation/pull/916", + "repo": "https://github.com/notaryproject/notation" + }, + "reactions": 2, + "comments": 13, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with notary-project installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:35.914Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1500-implementation-of-multiple-providers.json b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1500-implementation-of-multiple-providers.json new file mode 100644 index 00000000..4896371f --- /dev/null +++ b/solutions/cncf-generated/oauth2-proxy/oauth2-proxy-1500-implementation-of-multiple-providers.json @@ -0,0 +1,78 @@ +{ + "version": "kc-mission-v1", + "name": "oauth2-proxy-1500-implementation-of-multiple-providers", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "oauth2-proxy: Implementation of Multiple Providers", + "description": "* Added BUILDPLATFORM arg to dockerfile in order to pass arg from docker-compose build commands\n* Updated make file to support a multiple provider setup\n* Updated docker compose to support build + up\n* updated alpha configs to actually work\n* Updated sign_in template to support multiple providers\n* Updated validation package to loop over the provider array and validate all providers\n* Added Provider ID to Providerdata struct\n* Added Provider ID to the state so callback knows which provider to us", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "* Added BUILDPLATFORM arg to dockerfile in order to pass arg from docker-compose build commands\n* Updated make file to support a multiple provider setup\n* Updated docker compose to support build + up\n" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nI still have the following point to look at - but will open another PR for this point.\r\n\r\n* Currently combining all certificates for providers if offered - I think each provider needs its own http client so we have flexibility to handle combinations of providers with different settings (i.e. 1 provider has custom cert but another does not) - this will be a big task so i will probably create another PR\r\n\r\n\r\n## Description\r\n\r\n* Added BUILDPLATFORM arg to dockerfile in order to pass arg from docker\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/oauth2-proxy/oauth2-proxy/pull/1418. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## Description, Motivation and Context\n\nThis is a possible implementation of the facility discussed in #1314 to make it possible to pass through query parameters from `/oauth2/start` to the Identity Provider login URL in a secure and controlled way. Motivating use cases include\n\n - wanting most requests to use `prompt=login` by default, but with the possibility to override this to `prompt=select_account` or `prompt=consent` in certain specific cases\n - passing a `login_hint` when you already know the user's email address (#1369)\n - passing additional non-standard parameters required by certain providers, e.g. `organization` when using auth0 (https://github.com/oauth2-proxy/oauth2-proxy/issues/1314#issuecomment-950273012)\n\nI've introduced a generic mechanism whereby an oauth2-proxy user can configure `loginURLParameters` in the \"alpha\" YAML config to define the parameters that may be passed through from `/oauth2/start` to the IdP. This is a map from the query parameter name to a set of configuration options:\n\n```yaml\nloginURLParameters:\n prompt:\n default: [\"login\"]\n allowed: [\"select_account\", \"consent\"]\n login_hint:\n allowAny: true\n organization:\n default: [\"org1\"]\n```\n\nThe interaction between these options is not entirely intuitive, but the logic is intended to work as follows:\n\n- only URL parameters named under `loginURLParameters` are candidates to be passed through from `/oauth2/start?...` to the IdP, any other parameters on the `/oauth2/start` URL will b", + "codeSnippets": [ + "I still have the following point to look at - but will open another PR for this point.\r\n\r\n* Currently combining all certificates for providers if offered - I think each provider needs its own http client so we have flexibility to handle combinations of providers with different settings (i.e. 1 provider has custom cert but another does not) - this will be a big task so i will probably create another PR\r\n\r\n\r\n## Description\r\n\r\n* Added BUILDPLATFORM arg to dockerfile in order to pass arg from docker", + "I still have the following point to look at - but will open another PR for this point.\r\n\r\n* Currently combining all certificates for providers if offered - I think each provider needs its own http client so we have flexibility to handle combinations of providers with different settings (i.e. 1 provider has custom cert but another does not) - this will be a big task so i will probably create another PR\r\n\r\n\r\n## Description\r\n\r\n* Added BUILDPLATFORM arg to dockerfile in order to pass arg from docker-compose build commands\r\n* Updated make file to support a multiple provider setup\r\n* Updated docker compose to support build + up\r\n* updated alpha configs to actually work\r\n* Updated sign_in template to support multiple providers\r\n* Updated validation package to loop over the provider array and validate all providers\r\n* Added Provider ID to Providerdata struct\r\n* Added Provider ID to the state so callback knows which provider to use\r\n* Added dynamic oauth2/{id}/start routes for each provider ( /oauth2/start uses [0] provider still )\r\n* Converted providers.Provider interface to a slice in oauth struct.\r\n* Updated unit tests to support provider struct.\r\n* Return 404/400 if a /oauth2/{id}/start request does not match a providerid\r\n* Added validation to check for a provider ID and providerName in each provider - will shutdown if both do not exist.\r\n* Added Cookie refresh logic to select the correct provider\r\n* Update documentation for multiple provider config\r\n\r\n## Motivation and Context\r\n\r\n Important for environments with multiple companies etc\r\n https://github.com/oauth2-proxy/oauth2-proxy/issues/926 Closes #926\r\n\r\n## How Has This Been Tested?\r\n\r\n\r\ntested locally with Keycloak and Dex as multiple providers\r\n\r\n\r\n* Tested locally via docker compose using keycloak and dex as providers\r\n* Still must check new session setup with redis.\r\n* Will test this branch version on our kubernetes feature branch sandboxes over the next month - first with 1 provider to ensure no functionality is lost, then with mutiple afterwards\r\n* Tests still need to be updated \r\n\r\nWill test with Azure provider in our work environment.\r\n\r\n\r\n\r\nCurrently it should not affect anything else other than breaking the sign-in-message override, as it can not support multiple providers.\r\n\r\n## Checklist:\r\n\r\n\r\n\r\n\r\n- [x] My change requires a change to the documentation or CHANGELOG.\r\n- [x] I have updated the documentation/CHANGELOG accordingly.\r\n- [x] I have created a feature (non-master) branch for my PR.\r\n\n\r\n\r\n## Description, Motivation and Context\r\n\r\nThis is a possible implementation of the facility discussed in #1314 to make it possible to pass through query parameters from `/oauth2/start` to the Identity Provider login URL in a secure and controlled way. Motivating use cases include\r\n\r\n - wanting most requests to use `prompt=login` by default, but with the possibility to override this to `prompt=select_account` or `prompt=consent` in certain specific cases\r\n - passing a `login_hint` when you already know the user's email address (#1369)\r\n - passing additional non-standard parameters required by certain providers, e.g. `organization` when using auth0 (https://github.com/oauth2-proxy/oauth2-proxy/issues/1314#issuecomment-950273012)\r\n\r\nI've introduced a generic mechanism whereby an oauth2-proxy user can configure `loginURLParameters` in the \"alpha\" YAML config to define the parameters that may be passed through from `/oauth2/start` to the IdP. This is a map from the query parameter name to a set of configuration options:" + ] + } + }, + "metadata": { + "tags": [ + "oauth2-proxy", + "sandbox", + "app-definition", + "troubleshoot", + "enhancement" + ], + "cncfProjects": [ + "oauth2-proxy" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/oauth2-proxy/oauth2-proxy/pull/1500", + "repo": "https://github.com/oauth2-proxy/oauth2-proxy", + "pr": "https://github.com/oauth2-proxy/oauth2-proxy/pull/1418" + }, + "reactions": 11, + "comments": 4, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with oauth2-proxy installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:42:12.726Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4029-implement-distributed-tracing-using-opentelemtry.json b/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4029-implement-distributed-tracing-using-opentelemtry.json new file mode 100644 index 00000000..d0e5164f --- /dev/null +++ b/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4029-implement-distributed-tracing-using-opentelemtry.json @@ -0,0 +1,86 @@ +{ + "version": "kc-mission-v1", + "name": "open-policy-agent-opa-4029-implement-distributed-tracing-using-opentelemtry", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "open-policy-agent-opa-: Implement Distributed tracing using OpenTelemtry", + "description": "This commit implements tracing using the net/http [automatic instrumentation wrappers](https://opentelemetry.io/docs/go/instrumentation/#automatic-instrumentation) on the server and topdown/http packages.\n\nFollowing configuration flags are added:\n --distributed-tracing enable distributed tracing using OpenTelemetry Tracing\n --distributed-tracing-address string address of the OpenTelemetry Collector gRPC endpoint (default \"localhost:4317\")\n --distribute", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This commit implements tracing using the net/http [automatic instrumentation wrappers](https://opentelemetry.io/docs/go/instrumentation/#automatic-instrumentation) on the server and topdown/http packa" + }, + { + "title": "These attributes aren't available for review at the `/v1/config` endpoint.", + "description": "These attributes aren't available for review at the `/v1/config` endpoint." + }, + { + "title": "They can't be set/modified by discovery bundles.", + "description": "They can't be set/modified by discovery bundles." + }, + { + "title": "They can't be set programmatically / by the SDK (not sure if applicable here ...", + "description": "They can't be set programmatically / by the SDK (not sure if applicable here though)." + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n{\"client_addr\":\"127.0.0.1:53744\",\"level\":\"info\",\"msg\":\"Received request.\",\"req_id\":12,\"req_method\":\"GET\",\"req_path\":\"/health\",\"time\":\"2021-12-10T11:17:56+01:00\"}\r\n{\"client_addr\":\"127.0.0.1:53744\",\"level\":\"info\",\"msg\":\"Sent response.\",\"req_id\":12,\"req_method\":\"GET\",\"req_path\":\"/health\",\"resp_bytes\":2,\"resp_duration\":0.280679,\"resp_status\":200,\"time\":\"2021-12-10T11:17:56+01:00\"}\r\n{\"level\":\"error\",\"msg\":\"Distributed tracing: context deadline exceeded\",\"time\":\"2021-12-10T11:18:04+01:00\"}\r\n{\"level\":\"\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Love this! 👍 \n\nOne thing I'm not too sure about though is all the new arguments added to `opa run`. Not in the sense that they aren't necessary, but I wonder if it'd be a good idea to instead have them added to the OPA configuration? In addition to adding clutter to the `opa run --help` output, command line arguments have some disadvantages over config, including:\n\n1. These attributes aren't available for review at the `/v1/config` endpoint.\n2. They can't be set/modified by discovery bundles.\n3. They can't be set programmatically / by the SDK (not sure if applicable here though).\n\nWe've already seen this being problematic [elsewhere](https://github.com/open-policy-agent/opa/issues/3980), so perhaps this would be a good candidate for config rather than CLI arguments? Note that it's still possible to set config attributes through the CLI with `--set config=value` if one prefers that.\n\nThoughts, @srenatus @tsandall ?", + "codeSnippets": [ + "{\"client_addr\":\"127.0.0.1:53744\",\"level\":\"info\",\"msg\":\"Received request.\",\"req_id\":12,\"req_method\":\"GET\",\"req_path\":\"/health\",\"time\":\"2021-12-10T11:17:56+01:00\"}\r\n{\"client_addr\":\"127.0.0.1:53744\",\"level\":\"info\",\"msg\":\"Sent response.\",\"req_id\":12,\"req_method\":\"GET\",\"req_path\":\"/health\",\"resp_bytes\":2,\"resp_duration\":0.280679,\"resp_status\":200,\"time\":\"2021-12-10T11:17:56+01:00\"}\r\n{\"level\":\"error\",\"msg\":\"Distributed tracing: context deadline exceeded\",\"time\":\"2021-12-10T11:18:04+01:00\"}\r\n{\"level\":\"", + "{\"client_addr\":\"127.0.0.1:53744\",\"level\":\"info\",\"msg\":\"Received request.\",\"req_id\":12,\"req_method\":\"GET\",\"req_path\":\"/health\",\"time\":\"2021-12-10T11:17:56+01:00\"}\r\n{\"client_addr\":\"127.0.0.1:53744\",\"level\":\"info\",\"msg\":\"Sent response.\",\"req_id\":12,\"req_method\":\"GET\",\"req_path\":\"/health\",\"resp_bytes\":2,\"resp_duration\":0.280679,\"resp_status\":200,\"time\":\"2021-12-10T11:17:56+01:00\"}\r\n{\"level\":\"error\",\"msg\":\"Distributed tracing: context deadline exceeded\",\"time\":\"2021-12-10T11:18:04+01:00\"}\r\n{\"level\":\"error\",\"msg\":\"Distributed tracing: max retry time elapsed: rpc error: code = Unavailable desc = connection error: desc = \\\"transport: Error while dialing dial tcp 127.0.0.1:4317: connect: connection refused\\\"\",\"time\":\"2021-12-10T11:18:09+01:00\"}" + ] + } + }, + "metadata": { + "tags": [ + "open-policy-agent-opa-", + "graduated", + "security", + "troubleshoot" + ], + "cncfProjects": [ + "open-policy-agent-opa-" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/open-policy-agent/opa/pull/4029", + "repo": "https://github.com/open-policy-agent/opa" + }, + "reactions": 3, + "comments": 20, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with open-policy-agent-opa- installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:36:07.291Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4254-ci-publish-multi-arch-image-manifest-lists.json b/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4254-ci-publish-multi-arch-image-manifest-lists.json new file mode 100644 index 00000000..e777c682 --- /dev/null +++ b/solutions/cncf-generated/open-policy-agent-opa-/open-policy-agent-opa-4254-ci-publish-multi-arch-image-manifest-lists.json @@ -0,0 +1,78 @@ +{ + "version": "kc-mission-v1", + "name": "open-policy-agent-opa-4254-ci-publish-multi-arch-image-manifest-lists", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "open-policy-agent-opa-: ci: publish multi-arch image manifest lists", + "description": "This change adds linux/arm64 binaries to the release. It also publishes an arm64 container image for all variants (standard, debug, rootless, static) and releases (dev, edge, latest).\n\nThe build and push process uses buildx to push the individual images by digest (i.e. untagged) and reference them in a single, tagged manifest list. This avoids cluttering Docker Hub's tag list with `-` tags.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This change adds linux/arm64 binaries to the release. It also publishes an arm64 container image for all variants (standard, debug, rootless, static) and releases (dev, edge, latest).\n\nThe build and p" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nArchive: binaries.zip\r\n creating: 0.37.0-dev/\r\n inflating: 0.37.0-dev/opa_darwin_amd64.sha256\r\n inflating: 0.37.0-dev/opa_darwin_amd64\r\n inflating: 0.37.0-dev/opa_darwin_arm64_static.sha256\r\n inflating: 0.37.0-dev/opa_darwin_arm64_static\r\n inflating: 0.37.0-dev/opa_linux_amd64.sha256\r\n inflating: 0.37.0-dev/opa_linux_amd64\r\n inflating: 0.37.0-dev/opa_linux_amd64_static.sha256\r\n inflating: 0.37.0-dev/opa_linux_amd64_static\r\n inflating: 0.37.0-dev/opa_linux_arm64_static.sha256\r\n inflat\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/open-policy-agent/opa/pull/4282. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "With this change, we're dialling back #4254 a little when it comes to linux/arm64:\n\n- The built binaries only include a static linux_arm64:\n\n ```\n Archive: binaries.zip\n creating: 0.37.0-dev/\n inflating: 0.37.0-dev/opa_darwin_amd64.sha256\n inflating: 0.37.0-dev/opa_darwin_amd64\n inflating: 0.37.0-dev/opa_darwin_arm64_static.sha256\n inflating: 0.37.0-dev/opa_darwin_arm64_static\n inflating: 0.37.0-dev/opa_linux_amd64.sha256\n inflating: 0.37.0-dev/opa_linux_amd64\n inflating: 0.37.0-dev/opa_linux_amd64_static.sha256\n inflating: 0.37.0-dev/opa_linux_amd64_static\n inflating: 0.37.0-dev/opa_linux_arm64_static.sha256\n inflating: 0.37.0-dev/opa_linux_arm64_static\n inflating: 0.37.0-dev/opa_windows_amd64.exe.sha256\n inflating: 0.37.0-dev/opa_windows_amd64.exe\n ```\n- The docker manifests for the non-static tags (version, version-debug, version-rootless) only contain the linux/amd64 platforms\n- The docker manifest for the static tag (version-static) contains both linux/amd64 and linux/arm64\n- Smoke tests for our docker images only test the images we build", + "codeSnippets": [ + "Archive: binaries.zip\r\n creating: 0.37.0-dev/\r\n inflating: 0.37.0-dev/opa_darwin_amd64.sha256\r\n inflating: 0.37.0-dev/opa_darwin_amd64\r\n inflating: 0.37.0-dev/opa_darwin_arm64_static.sha256\r\n inflating: 0.37.0-dev/opa_darwin_arm64_static\r\n inflating: 0.37.0-dev/opa_linux_amd64.sha256\r\n inflating: 0.37.0-dev/opa_linux_amd64\r\n inflating: 0.37.0-dev/opa_linux_amd64_static.sha256\r\n inflating: 0.37.0-dev/opa_linux_amd64_static\r\n inflating: 0.37.0-dev/opa_linux_arm64_static.sha256\r\n inflat", + "Archive: binaries.zip\r\n creating: 0.37.0-dev/\r\n inflating: 0.37.0-dev/opa_darwin_amd64.sha256\r\n inflating: 0.37.0-dev/opa_darwin_amd64\r\n inflating: 0.37.0-dev/opa_darwin_arm64_static.sha256\r\n inflating: 0.37.0-dev/opa_darwin_arm64_static\r\n inflating: 0.37.0-dev/opa_linux_amd64.sha256\r\n inflating: 0.37.0-dev/opa_linux_amd64\r\n inflating: 0.37.0-dev/opa_linux_amd64_static.sha256\r\n inflating: 0.37.0-dev/opa_linux_amd64_static\r\n inflating: 0.37.0-dev/opa_linux_arm64_static.sha256\r\n inflating: 0.37.0-dev/opa_linux_arm64_static\r\n inflating: 0.37.0-dev/opa_windows_amd64.exe.sha256\r\n inflating: 0.37.0-dev/opa_windows_amd64.exe", + "WARNING: The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested" + ] + } + }, + "metadata": { + "tags": [ + "open-policy-agent-opa-", + "graduated", + "security", + "troubleshoot" + ], + "cncfProjects": [ + "open-policy-agent-opa-" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/open-policy-agent/opa/pull/4254", + "repo": "https://github.com/open-policy-agent/opa", + "pr": "https://github.com/open-policy-agent/opa/pull/4282" + }, + "reactions": 8, + "comments": 10, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with open-policy-agent-opa- installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:36:00.897Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opencost/opencost-2423-use-node-label-instead-of-extracting-from-instance.json b/solutions/cncf-generated/opencost/opencost-2423-use-node-label-instead-of-extracting-from-instance.json new file mode 100644 index 00000000..ae722977 --- /dev/null +++ b/solutions/cncf-generated/opencost/opencost-2423-use-node-label-instead-of-extracting-from-instance.json @@ -0,0 +1,88 @@ +{ + "version": "kc-mission-v1", + "name": "opencost-2423-use-node-label-instead-of-extracting-from-instance", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "opencost: Use node label instead of extracting from instance", + "description": "Given a common cAdvisor setup the instance label will refer to the node by IP rather than hostname\nThis uses the `node` label instead, relying on a setup where that label is properly configured rather than an unexpected `instance` setup.\n\n## What does this PR change?\n* Switches the querying to the `node` label instead of relying on users configuring prometheus to add the node to the `instance` hostname\n\n## How will this PR impact users?\n* Common monitoring setups will no longer break opencost (t", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Given a common cAdvisor setup the instance label will refer to the node by IP rather than hostname\nThis uses the `node` label instead, relying on a setup where that label is properly configured rather" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nrelabel_configs:\r\n- action: labelmap\r\n regex: __meta_kubernetes_node_name\r\n replacement: node\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubecost/kubecost/pull/2893. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## What does this PR change?\nThis is a companion to https://github.com/opencost/opencost/pull/2423\n\n## Does this PR rely on any other PRs?\nWithout the changes from [opencost/2423](https://github.com/opencost/opencost/pull/2423) this _will_ break queries in kubecost.\n\n## How does this PR impact users? (This is the kind of thing that goes in release notes!)\nIf you've setup your own `relabel_configs` you need to add the following configuration manually:\n```yaml\n - action: labelmap\n regex: __meta_kubernetes_node_name\n replacement: node\n```\n\n## Links to Issues or tickets this PR addresses or fixes\n\n## What risks are associated with merging this PR? What is required to fully test this PR?\nWithout the changes from the linked PR some queries will stop working, this should happen immediately and get caught by tests\n\n## How was this PR tested?\n\n## Have you made an update to documentation? If so, please provide the corresponding PR.", + "codeSnippets": [ + "relabel_configs:\r\n- action: labelmap\r\n regex: __meta_kubernetes_node_name\r\n replacement: node", + "relabel_configs:\r\n- action: labelmap\r\n regex: __meta_kubernetes_node_name\r\n replacement: node", + "- action: labelmap\r\n regex: __meta_kubernetes_node_name\r\n replacement: node", + "- target_label: __address__\r\n replacement: kubernetes.default.svc:443" + ] + } + }, + "metadata": { + "tags": [ + "opencost", + "incubating", + "observability", + "troubleshoot", + "p2", + "kubecost", + "e2", + "needs-follow-up", + "jiracreated", + "1-115" + ], + "cncfProjects": [ + "opencost" + ], + "targetResourceKinds": [ + "Service", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/opencost/opencost/pull/2423", + "repo": "https://github.com/opencost/opencost", + "pr": "https://github.com/kubecost/kubecost/pull/2893" + }, + "reactions": 5, + "comments": 30, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with opencost installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:40.875Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentelemetry-operator/opentelemetry-operator-689-choose-target-container-injection-with-annotation.json b/solutions/cncf-generated/opentelemetry-operator/opentelemetry-operator-689-choose-target-container-injection-with-annotation.json new file mode 100644 index 00000000..59c12dee --- /dev/null +++ b/solutions/cncf-generated/opentelemetry-operator/opentelemetry-operator-689-choose-target-container-injection-with-annotation.json @@ -0,0 +1,78 @@ +{ + "version": "kc-mission-v1", + "name": "opentelemetry-operator-689-choose-target-container-injection-with-annotation", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "opentelemetry-operator: Choose target container injection with annotation", + "description": "This PR address the same problem of the following PR (https://github.com/open-telemetry/opentelemetry-operator/pull/683) without introducing big changes in codebase.\nI just introduce a new annotation near existing auto-inject instrumenation that allow to search first for a corresponding containername before fallback to the first one by default : \n \n\n```\ninstrumentation.opentelemetry.io/container-name: autodeploy\ninstrumentation.opentelemetry.io/inject-java: \"true\"\n```\n\nThis approach present th", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR address the same problem of the following PR (https://github.com/open-telemetry/opentelemetry-operator/pull/683) without introducing big changes in codebase.\nI just introduce a new annotation " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\ninstrumentation.opentelemetry.io/container-name: autodeploy\r\ninstrumentation.opentelemetry.io/inject-java: \"true\"\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/open-telemetry/opentelemetry-operator/pull/683. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "NOTE: Due to the changes in injection behaviour I have only included the javaagent changes for early review. If the java changes are greenlit, I will add python and nodejs accordingly\n\nCurrently, it is only possible to inject a single autoinstrumentation\nper pod and it will always be injected into the first container and\nfirst container only. This is especially troublesome if a mutating\nwebhook will be injecting additional sidecars (e.g. Istio) which will\noften receive the instrumentation instead of the main container.\nFurthermore, for multi-container, multi-language pods only one container\ncan currently be auto-instrumented.\n\nThis commit introduces a new set of annotations of the form\n`instrumentation.opentelemetry.io/inject--container-names`,\nwhich tells the operator which containers should receive which language\ninstrumentation. The value of the annotation should be a comma-delimited\nlist of container-names to inject into. If the annotation is not\npresent, injection will use the default container-selection (currently\nfirst container). The annotation will be ignored if the corresponding\nlanguage annotation `instrumentation.opentelemetry.io/inject-`\nis not present or set to enable instrumentation for ``.\n\nNote: It is now possible that instrumentation for multiple languages\nneeds to be prepared for the same pod. To avoid clashes, the volumes,\nvolume mount paths and initContainers for each language have been\npostfixed with", + "codeSnippets": [ + "instrumentation.opentelemetry.io/container-name: autodeploy\r\ninstrumentation.opentelemetry.io/inject-java: \"true\"", + "instrumentation.opentelemetry.io/container-name: autodeploy\r\ninstrumentation.opentelemetry.io/inject-java: \"true\"", + "instrumentation.opentelemetry.io/inject-java-container-names = \"spring-petclinic\"" + ] + } + }, + "metadata": { + "tags": [ + "opentelemetry-operator", + "incubating", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "opentelemetry-operator" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/open-telemetry/opentelemetry-operator/pull/689", + "repo": "https://github.com/open-telemetry/opentelemetry-operator", + "pr": "https://github.com/open-telemetry/opentelemetry-operator/pull/683" + }, + "reactions": 6, + "comments": 28, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with opentelemetry-operator installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:30.118Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/opentelemetry-operator/opentelemetry-operator-976-support-dotnet-auto-instrumentation.json b/solutions/cncf-generated/opentelemetry-operator/opentelemetry-operator-976-support-dotnet-auto-instrumentation.json new file mode 100644 index 00000000..76f17478 --- /dev/null +++ b/solutions/cncf-generated/opentelemetry-operator/opentelemetry-operator-976-support-dotnet-auto-instrumentation.json @@ -0,0 +1,85 @@ +{ + "version": "kc-mission-v1", + "name": "opentelemetry-operator-976-support-dotnet-auto-instrumentation", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "opentelemetry-operator: Support DotNet auto-instrumentation", + "description": "This PR adds a support for auto-instrumentation of the .Net applications. \nCurrently, dot net auto-instrumentation image is using the release [opentelemetry-dotnet-instrumentation v0.2.0-beta.1](https://github.com/open-telemetry/opentelemetry-dotnet-instrumentation/releases/tag/v0.2.0-beta.1).\n\nFollowing environment variables are injected to the application container to enable the auto-instrumentation.\n\n```\nDOTNET_ADDITIONAL_DEPS=%InstallationLocation%/AdditionalDeps\nDOTNET_SHARED_STORE=%Instal", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR adds a support for auto-instrumentation of the .Net applications. \nCurrently, dot net auto-instrumentation image is using the release [opentelemetry-dotnet-instrumentation v0.2.0-beta.1](http" + }, + { + "title": "Added Dockerfile for building dotnet auto-instrumentation image.", + "description": "Added Dockerfile for building dotnet auto-instrumentation image." + }, + { + "title": "Added Github action for building and pushing the dotnet auto-instrumentation ...", + "description": "Added Github action for building and pushing the dotnet auto-instrumentation image." + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nDOTNET_ADDITIONAL_DEPS=%InstallationLocation%/AdditionalDeps\r\nDOTNET_SHARED_STORE=%InstallationLocation%/store\r\nDOTNET_STARTUP_HOOKS=%InstallationLocation%/netcoreapp3.1/OpenTelemetry.AutoInstrumentation.StartupHook.dll\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/open-telemetry/opentelemetry-operator/pull/989. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR is related with the PR [#976 ](https://github.com/open-telemetry/opentelemetry-operator/pull/976).\nThis PR should be merged before [#976 ](https://github.com/open-telemetry/opentelemetry-operator/pull/976)\n\n1. Added Dockerfile for building dotnet auto-instrumentation image.\n2. Added Github action for building and pushing the dotnet auto-instrumentation image.", + "codeSnippets": [ + "DOTNET_ADDITIONAL_DEPS=%InstallationLocation%/AdditionalDeps\r\nDOTNET_SHARED_STORE=%InstallationLocation%/store\r\nDOTNET_STARTUP_HOOKS=%InstallationLocation%/netcoreapp3.1/OpenTelemetry.AutoInstrumentation.StartupHook.dll", + "DOTNET_ADDITIONAL_DEPS=%InstallationLocation%/AdditionalDeps\r\nDOTNET_SHARED_STORE=%InstallationLocation%/store\r\nDOTNET_STARTUP_HOOKS=%InstallationLocation%/netcoreapp3.1/OpenTelemetry.AutoInstrumentation.StartupHook.dll" + ] + } + }, + "metadata": { + "tags": [ + "opentelemetry-operator", + "incubating", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "opentelemetry-operator" + ], + "targetResourceKinds": [], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/open-telemetry/opentelemetry-operator/pull/976", + "repo": "https://github.com/open-telemetry/opentelemetry-operator", + "pr": "https://github.com/open-telemetry/opentelemetry-operator/pull/989" + }, + "reactions": 5, + "comments": 5, + "synthesizedBy": "regex", + "qualityScore": 66 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with opentelemetry-operator installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:35.700Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/podman-container-tools/podman-container-tools-13490-pod-logs-enhancements-option-to-color-logs.json b/solutions/cncf-generated/podman-container-tools/podman-container-tools-13490-pod-logs-enhancements-option-to-color-logs.json new file mode 100644 index 00000000..66677834 --- /dev/null +++ b/solutions/cncf-generated/podman-container-tools/podman-container-tools-13490-pod-logs-enhancements-option-to-color-logs.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "podman-container-tools-13490-pod-logs-enhancements-option-to-color-logs", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "podman-container-tools: pod logs enhancements: option to color logs", + "description": "Created an option to colourise ```pod logs``` with an option ```--color```. You can recreate with the following steps:\n\n1. Create a pod with containers\n```bash\nbin/podman pod create --name=pod_testlogs; bin/podman run --name=testlogs_loop1_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop1: $i\"; sleep 1; done'; bin/podman run --name=testlogs_loop2_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop2: $i\"; sleep 3; done';\n```\n\n2. Logs", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Created an option to colourise ```pod logs``` with an option ```--color```. You can recreate with the following steps:\n\n1. Create a pod with containers\n```bash\nbin/podman pod create --name=pod_testlo" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nbin/podman pod create --name=pod_testlogs; bin/podman run --name=testlogs_loop1_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop1: $i\"; sleep 1; done'; bin/podman run --name=testlogs_loop2_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop2: $i\"; sleep 3; done';\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "You need to do something like\n\ngit pull origin main\ngit rebase -i origin\nRemove all of the code in the rebase that you did not add and squash all of your commits into a single PR\nthen do a \ngit push --force", + "codeSnippets": [ + "bin/podman pod create --name=pod_testlogs; bin/podman run --name=testlogs_loop1_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop1: $i\"; sleep 1; done'; bin/podman run --name=testlogs_loop2_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop2: $i\"; sleep 3; done';", + "bin/podman pod create --name=pod_testlogs; bin/podman run --name=testlogs_loop1_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop1: $i\"; sleep 1; done'; bin/podman run --name=testlogs_loop2_1 -d --pod=pod_testlogs busybox /bin/sh -c 'for i in `seq 1 10000`; do echo \"loop2: $i\"; sleep 3; done';", + "bin/podman pod logs --tail=10 -f --color pod_testlogs", + "bin/podman kill --all; bin/podman rm --all; bin/podman pod rm --all", + "time=\"2022-03-28T20:30:41+02:00\" level=warning msg=\"\\\"/\\\" is not a shared mount, this could cause issues or missing mounts with rootless containers\"\r\nCapBnd: 00000000a80425fb\r\nCapEff: 0000000000000000\r\nCapInh: 0000000000000000\r\nCapBnd: 00000000a80425fb\r\nCapEff: 00000000a80425fb\r\nCapInh: 00000000a80425fb\r\nCapBnd: 00000000a80425fb\r\nCapEff: 00000000a80425fb\r\nCapAmb: 0000000000000002\r\nCapInh: 0000000000000002\r\nCapAmb: 0000000000000000\r\nCapAmb: 0000000000000000\r\nCapInh: 00000000a80425fb\r\nCapAmb: 0000000000000002\r\nCapInh: 0000000000000000\r\n\r\n------------------------------\r\n• Failure [17.948 seconds]\r\nPodman run\r\n/usr/local/go/src/github.com/containers/podman/test/e2e/run_test.go:25\r\n podman run user capabilities test [It]\r\n /usr/local/go/src/github.com/containers/podman/test/e2e/run_test.go:467\r\n\r\n Expected\r\n : CapInh: 0000000000000000\r\n to contain substring\r\n : 0000000000000002\r\n\r\n /usr/local/go/src/github.com/containers/podman/test/e2e/run_test.go:553\r\n------------------------------\r\nSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSintegration timing results\r\npodman run user capabilities test 17.947786\r\n\"unlinkat /tmp/podman/imagecachedir/vfs/dir/2fa52ae883433bc788d7fa1737ca50d2802e0e39d71cae30b4c49378c1207a17/home: permission denied\"\r\n\r\n\r\nSummarizing 1 Failure:\r\n\r\n[Fail] Podman run [It] podman run user capabilities test \r\n/usr/local/go/src/github.com/containers/podman/test/e2e/run_test.go:553\r\n\r\nRan 1 of 114 Specs in 41.103 seconds\r\nFAIL! -- 0 Passed | 1 Failed | 0 Pending | 113 Skipped\r\n--- FAIL: TestLibpod (41.12s)\r\nFAIL\r\nFAIL command-line-arguments 41.131s\r\nFAIL" + ] + } + }, + "metadata": { + "tags": [ + "podman-container-tools", + "sandbox", + "app-definition", + "troubleshoot", + "lgtm", + "approved", + "locked---please-file-new-issue-pr" + ], + "cncfProjects": [ + "podman-container-tools" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/containers/podman/pull/13490", + "repo": "https://github.com/containers/podman" + }, + "reactions": 2, + "comments": 14, + "synthesizedBy": "regex", + "qualityScore": 62 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with podman-container-tools installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:42:46.510Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/podman-container-tools/podman-container-tools-4215-add-squash-all-fix-squash-option-in-build.json b/solutions/cncf-generated/podman-container-tools/podman-container-tools-4215-add-squash-all-fix-squash-option-in-build.json new file mode 100644 index 00000000..93bc322d --- /dev/null +++ b/solutions/cncf-generated/podman-container-tools/podman-container-tools-4215-add-squash-all-fix-squash-option-in-build.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "podman-container-tools-4215-add-squash-all-fix-squash-option-in-build", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "podman-container-tools: Add squash-all, fix squash option in build", + "description": "Translate the podman build --squash command to podman build --layers=false which\nhas the same functionality as docker build --squash. Add a new option --squash-all\nwhich will squash all layers into one. This will be translated to buildah bud --squash\nfor the buildah bud api.\n\nAlso allow only one option, squash, layers or squash--all to be used per build command.\n\nFixes: https://github.com/containers/buildah/issues/1234", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Translate the podman build --squash command to podman build --layers=false which\nhas the same functionality as docker build --squash. Add a new option --squash-all\nwhich will squash all layers into on" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n# touch alpinetest.tgz # or better yet create a tgz file with a few files in it.\r\n\r\n# cat ~/Dockerfile.squash-a\r\nFROM busybox:latest\r\nADD alpinetest.tgz /data\r\n\r\n# cat ~/Dockerfile.squash-b\r\nFROM test-squash-a:latest\r\nRUN rm -rf /data\r\n\r\n# cat ~/Dockerfile.squash-c\r\nFROM busybox:latest\r\nADD alpinetest.tgz /data\r\nRUN rm -rf /data\r\n\r\n# podman build -f ~/Dockerfile.squash-a -t test-squash-a:latest .\r\nSTEP 1: FROM busybox:latest\r\nSTEP 2: ADD alpinetest.tgz /data\r\n--> Using cache c54637689450d9516e6f\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/containers/buildah/pull/1900. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Add a `--squash-all` flag that will be unique to `podman build` and will not\nbe used by Buildah. `buildah bud --layers=false` is equivalent to `docker build --squash` (experimental). When a podman build command includes the `--squash` option, podman will translate that to `--layers=false` for the buildah bud api code. A `docker build` call is equivalent to `podman build` or `podman build --layers=true` and that's not changing.\n\nWhen podman build uses the `--squash-all` command, it will turn on the --squash option for the buildah bud api to have only one layer returned by the buildah bud code. There's not a Docker equivalent for this at the moment.\n\nI'll hopefully be spinning up a Podman PR in the next day or two that will use this new option.", + "codeSnippets": [ + "# touch alpinetest.tgz # or better yet create a tgz file with a few files in it.\r\n\r\n# cat ~/Dockerfile.squash-a\r\nFROM busybox:latest\r\nADD alpinetest.tgz /data\r\n\r\n# cat ~/Dockerfile.squash-b\r\nFROM test-squash-a:latest\r\nRUN rm -rf /data\r\n\r\n# cat ~/Dockerfile.squash-c\r\nFROM busybox:latest\r\nADD alpinetest.tgz /data\r\nRUN rm -rf /data\r\n\r\n# podman build -f ~/Dockerfile.squash-a -t test-squash-a:latest .\r\nSTEP 1: FROM busybox:latest\r\nSTEP 2: ADD alpinetest.tgz /data\r\n--> Using cache c54637689450d9516e6f", + "# touch alpinetest.tgz # or better yet create a tgz file with a few files in it.\r\n\r\n# cat ~/Dockerfile.squash-a\r\nFROM busybox:latest\r\nADD alpinetest.tgz /data\r\n\r\n# cat ~/Dockerfile.squash-b\r\nFROM test-squash-a:latest\r\nRUN rm -rf /data\r\n\r\n# cat ~/Dockerfile.squash-c\r\nFROM busybox:latest\r\nADD alpinetest.tgz /data\r\nRUN rm -rf /data\r\n\r\n# podman build -f ~/Dockerfile.squash-a -t test-squash-a:latest .\r\nSTEP 1: FROM busybox:latest\r\nSTEP 2: ADD alpinetest.tgz /data\r\n--> Using cache c54637689450d9516e6f2e497bb78f663405d9b609882ec89f4b481e84395eba\r\nSTEP 3: COMMIT test-squash-a:latest\r\nc54637689450d9516e6f2e497bb78f663405d9b609882ec89f4b481e84395eba\r\n\r\n# podman build -f ~/Dockerfile.squash-b --squash -t test-squash-b:latest .\r\nSTEP 1: FROM test-squash-a:latest\r\nSTEP 2: RUN rm -rf /data\r\nSTEP 3: COMMIT test-squash-b:latest\r\nGetting image source signatures\r\nCopying blob 6c0ea40aef9d skipped: already exists\r\nCopying blob 2f2b43ce2ffb skipped: already exists\r\nCopying blob 2f27bd025155 done\r\nCopying config bb31c226ee done\r\nWriting manifest to image destination\r\nStoring signatures\r\nbb31c226ee4317cbecd41747500e58c92c2f00c13df4e591d20571974a8a9109\r\n\r\n# podman build -f ~/Dockerfile.squash-c --squash -t test-squash-c:latest .\r\nSTEP 1: FROM busybox:latest\r\nSTEP 2: ADD alpinetest.tgz /data\r\nSTEP 3: RUN rm -rf /data\r\nSTEP 4: COMMIT test-squash-c:latest\r\nGetting image source signatures\r\nCopying blob 6c0ea40aef9d skipped: already exists\r\nCopying blob cb21ed953734 done\r\nCopying config 7ccbafc877 done\r\nWriting manifest to image destination\r\nStoring signatures\r\n7ccbafc877f09c403f455918e97bca9bad7895719baf5e43177fbd555202768f\r\n\r\n# podman build -f ~/Dockerfile.squash-c --squash-all -t test-squash-d:latest .\r\nSTEP 1: FROM busybox:latest\r\nSTEP 2: ADD alpinetest.tgz /data\r\nSTEP 3: RUN rm -rf /data\r\nSTEP 4: COMMIT test-squash-d:latest\r\nGetting image source signatures\r\nCopying blob 2e9938a66ae2 done\r\nCopying config 968b309097 done\r\nWriting manifest to image destination\r\nStoring signatures\r\n968b3090976361347cd9e40ee7e8d98c1cba9692524c94abce6d7f3101463c5a\r\n\r\n# podman inspect --format \"{{.RootFS.Layers}}\" test-squash-a\r\n[sha256:6c0ea40aef9d2795f922f4e8642f0cd9ffb9404e6f3214693a1fd45489f38b44 sha256:2f2b43ce2ffb410197ef7c43d96e132a1d12a24519f1fc0eb3598afd9c912ccd]\r\n\r\n# podman inspect --format \"{{.RootFS.Layers}}\" test-squash-b\r\n[sha256:6c0ea40aef9d2795f922f4e8642f0cd9ffb9404e6f3214693a1fd45489f38b44 sha256:2f2b43ce2ffb410197ef7c43d96e132a1d12a24519f1fc0eb3598afd9c912ccd sha256:5a47f575b5d22f740a0689eb1e72ba71fb1491e22a5740bef87023c64c7605e6]\r\n\r\n# podman inspect --format \"{{.RootFS.Layers}}\" test-squash-c\r\n[sha256:6c0ea40aef9d2795f922f4e8642f0cd9ffb9404e6f3214693a1fd45489f38b44 sha256:46f19664b40a2e34c2cab5d6ead1fefcba940c4bbda55ae742206cef82edebbd]\r\n\r\n# podman inspect --format \"{{.RootFS.Layers}}\" test-squash-d\r\n[sha256:c84a24cbfc61792badfacf0d6831d3ec5fa406f0a93cb0ad7b6e9b15bbb6ed54]" + ] + } + }, + "metadata": { + "tags": [ + "podman-container-tools", + "sandbox", + "app-definition", + "troubleshoot", + "lgtm", + "approved", + "locked---please-file-new-issue-pr" + ], + "cncfProjects": [ + "podman-container-tools" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/containers/podman/pull/4215", + "repo": "https://github.com/containers/podman", + "pr": "https://github.com/containers/buildah/pull/1900" + }, + "reactions": 2, + "comments": 10, + "synthesizedBy": "regex", + "qualityScore": 60 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with podman-container-tools installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:42:42.607Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/podman-container-tools/podman-container-tools-6442-shell-completion.json b/solutions/cncf-generated/podman-container-tools/podman-container-tools-6442-shell-completion.json new file mode 100644 index 00000000..7e28f7ed --- /dev/null +++ b/solutions/cncf-generated/podman-container-tools/podman-container-tools-6442-shell-completion.json @@ -0,0 +1,81 @@ +{ + "version": "kc-mission-v1", + "name": "podman-container-tools-6442-shell-completion", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "podman-container-tools: Shell completion", + "description": "Add shell completion with cobra\n \nAllow automatic generation for shell completion scripts\nwith the internal cobra functions (requires v1.0.0)\n \nThis should replace the handwritten completion scripts\nand even adds support for fish and powershell(if needed?)\n\nWe can now create the scripts with\n- podman completion bash\n- podman completion zsh\n- podman completion fish\n- podman completion powershell\n\nto test the completion run:\n`source <(podman completion bash)`\n\nI added the main completion for all ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Add shell completion with cobra\n \nAllow automatic generation for shell completion scripts\nwith the internal cobra functions (requires v1.0.0)\n \nThis should replace the handwritten completion scripts\n" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n[UPDATED TO ADD: bash-5.0.11-2.fc32]\nyou have to use `./bin/podman p` because it implements some custom hidden args on the podman command\nI did (try both `podman` and `./bin/podman`); I redacted one just for brevity. No difference. \nok i installed it with `sudo make install` i think the new podman version needs to be in your $PATH\nSolved: you need to run this first:\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "zsh: I suspect this is a bug in cobra itself, not your code: any flag whose default is empty causes a zsh completion error:\n```console\nzsh$ podman volume create\n_arguments:comparguments:325: invalid option definition: (*-l *--label)*-l[Set metadata for a volume (default [])]:\n\nzsh$ podman completion zsh | grep -3 'Set metadata for a volume'\nfunction _podman_volume_create {\n _arguments \\\n '--driver[Specify volume driver name]:' \\\n '(*-l *--label)'{\\*-l,\\*--label}'[Set metadata for a volume (default [])]:' \\\n '(*-o *--opt)'{\\*-o,\\*--opt}'[Set driver specific options (default [])]:' \\\n '--cgroup-manager[Cgroup manager to use (\"cgroupfs\"|\"systemd\")]:' \\\n '--cni-config-dir[Path of the configuration directory for CNI networks]:' \\\n```\nThere are 35 instances of this. One easy solution is to `sed -e 's/\\[\\]/none/'` or `s/\\(\\[\\|\\]\\)/\\\\\\1/g`", + "codeSnippets": [ + "[UPDATED TO ADD: bash-5.0.11-2.fc32]\nyou have to use `./bin/podman p` because it implements some custom hidden args on the podman command\nI did (try both `podman` and `./bin/podman`); I redacted one just for brevity. No difference. \nok i installed it with `sudo make install` i think the new podman version needs to be in your $PATH\nSolved: you need to run this first:", + "[UPDATED TO ADD: bash-5.0.11-2.fc32]\nyou have to use `./bin/podman p` because it implements some custom hidden args on the podman command\nI did (try both `podman` and `./bin/podman`); I redacted one just for brevity. No difference. \nok i installed it with `sudo make install` i think the new podman version needs to be in your $PATH\nSolved: you need to run this first:", + "Can you find a way to disable the showing of global flags on subcommands? It is the opposite of useful to see 15+ suggestions on a command that has only two flags:", + "There is a weird inconsistency in the autogenerated flag completions, and I can't figure out where it's happening:", + "Note that the first one has an `=` suffix, the second does not. This produces duplicate completions, as seen in my earlier comment above (`--something` and `--something=`). I think it makes most sense to remove the `=` but as long as you stick with one format I'm OK with it.\r\n\r\nAlso, is there a way to add option completion to flags? E.g. `--cgroup-manager` currently uses file completion, it should offer only two choices.\r\n\r\nAnd hey, I know I'm griping a lot, but this is really great work. I really like it and think it's much more maintainable than the current bash completions. Thank you for your work on this.\nI think thats because these are persistent flags and can therefore used on any subcommand\r\n`podman rmi --log-level info imagename` is completely valid\nIt may be valid but it's completely useless, and no human will ever want to do that. The possibility is there due to an accident of history. If there is any possible way to remove those from completion, I encourage you to do so.\n> There is a weird inconsistency in the autogenerated flag completions, and I can't figure out where it's happening:\r\n> \r\n>" + ] + } + }, + "metadata": { + "tags": [ + "podman-container-tools", + "sandbox", + "app-definition", + "troubleshoot", + "lgtm", + "approved", + "ok-to-test", + "locked---please-file-new-issue-pr" + ], + "cncfProjects": [ + "podman-container-tools" + ], + "targetResourceKinds": [ + "Pod" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/containers/podman/pull/6442", + "repo": "https://github.com/containers/podman" + }, + "reactions": 7, + "comments": 59, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with podman-container-tools installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:42:32.736Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/podman-container-tools/podman-container-tools-9423-rootless-cni-without-infra-container.json b/solutions/cncf-generated/podman-container-tools/podman-container-tools-9423-rootless-cni-without-infra-container.json new file mode 100644 index 00000000..10f2e220 --- /dev/null +++ b/solutions/cncf-generated/podman-container-tools/podman-container-tools-9423-rootless-cni-without-infra-container.json @@ -0,0 +1,85 @@ +{ + "version": "kc-mission-v1", + "name": "podman-container-tools-9423-rootless-cni-without-infra-container", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "podman-container-tools: rootless cni without infra container", + "description": "Instead of creating an extra container create a network and mount\nnamespace inside the podman user namespace. This ns is used to\nfor rootless cni operations.\nThis helps to align the rootless and rootful network code path.\nIf we run as rootless we just have to set up a extra net ns and\ninitialize slirp4netns in it. The ocicni lib will be called in\nthat net ns.\n\nThis design allows allows easier maintenance, no extra container\nwith pause processes, support for rootless cni with --uidmap\nand possibl", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Instead of creating an extra container create a network and mount\nnamespace inside the podman user namespace. This ns is used to\nfor rootless cni operations.\nThis helps to align the rootless and rootf" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nI'm seeing this (Build failed, gosrc something) in other PRs. I'm guessing Cirrus is down or having problems.\n@edsantiago has it, that should work fine (and looks like it does) for `.cirrus.yml`. However, the compose tests don't go through `dotest()` function in `contrib/cirrus/runner.sh`. So `compose test on fedora-33 (rootless)` is in name only, we also need to teach the script how to actually run as the user...\nOkay, I think this should work (and support running other tests rootless in the f\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/containers/podman/pull/8910. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "As proposed by Akihiro Suda make the rootless-cni-infra container use\nthe host rootfs instead of an image. This works by mounting the host\nrootfs in the user namespace to `$runroot/rootless-cni-infra`\nand use this as rootfs for the container.\n\nSecond, rewrite the rootless-cni-infra shell script in go to remove the\nextra cnitool dependency which is not packaged anywhere. With that we\nonly need the same dependencies as rootful podman which should be\nalready installed.\n\nAdvantages:\n- Works for all architectures podman supports.\n- Works without internet connection.\n- No extra maintainence of an extra image.\n\nDisadvantages:\n- Requires the dependencies to be available on the host (e.g. dnsname\nplugin). The user may not have control over those.\n\nProblems:\n- It doesn't unmount the rootfs if the the rootless-cni-infra container\nis stopped directly.\n\nAlso the image version did not respect the `--cni-config-dir` option\nproperly. It mounted the cni config dir only at container create time\nbut this option can be used on podman run commands which did not\nworked if the rootless-cni-infra container was already running.\nThis is only possible with the rootfs version.\n\nLive upgrading is possible. If the old infra container is still\nrunning podman talks via the old api to the script. Once the\nold infra container is deleted the new imageless infra container\nwill be created and podman can talk via the new api. A version\nlabel is added to the container to distinguish between old and new.", + "codeSnippets": [ + "I'm seeing this (Build failed, gosrc something) in other PRs. I'm guessing Cirrus is down or having problems.\n@edsantiago has it, that should work fine (and looks like it does) for `.cirrus.yml`. However, the compose tests don't go through `dotest()` function in `contrib/cirrus/runner.sh`. So `compose test on fedora-33 (rootless)` is in name only, we also need to teach the script how to actually run as the user...\nOkay, I think this should work (and support running other tests rootless in the f", + "I'm seeing this (Build failed, gosrc something) in other PRs. I'm guessing Cirrus is down or having problems.\n@edsantiago has it, that should work fine (and looks like it does) for `.cirrus.yml`. However, the compose tests don't go through `dotest()` function in `contrib/cirrus/runner.sh`. So `compose test on fedora-33 (rootless)` is in name only, we also need to teach the script how to actually run as the user...\nOkay, I think this should work (and support running other tests rootless in the future, if needed):" + ] + } + }, + "metadata": { + "tags": [ + "podman-container-tools", + "sandbox", + "app-definition", + "troubleshoot", + "rootless", + "lgtm", + "approved", + "cni", + "locked---please-file-new-issue-pr" + ], + "cncfProjects": [ + "podman-container-tools" + ], + "targetResourceKinds": [ + "Pod", + "Namespace" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/containers/podman/pull/9423", + "repo": "https://github.com/containers/podman", + "pr": "https://github.com/containers/podman/pull/8910" + }, + "reactions": 4, + "comments": 10, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with podman-container-tools installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:42:35.773Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/prometheus/prometheus-5031-docker-images-for-arm32v7-and-arm64v8.json b/solutions/cncf-generated/prometheus/prometheus-5031-docker-images-for-arm32v7-and-arm64v8.json new file mode 100644 index 00000000..90c92b68 --- /dev/null +++ b/solutions/cncf-generated/prometheus/prometheus-5031-docker-images-for-arm32v7-and-arm64v8.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "prometheus-5031-docker-images-for-arm32v7-and-arm64v8", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "prometheus: Docker images for ARM32v7 and ARM64v8", + "description": "Build and push docker images for linux arm32v7 and arm64v8.\n\nFixes https://github.com/prometheus/promu/issues/89\n\n**Building**\nThe ARM images are building using `binfmt_misc`, which is the same mechanism also used by Docker for Mac to support building ARM images, some details are here https://www.ecliptik.com/Cross-Building-and-Running-Multi-Arch-Docker-Images/\n\nWhile the circle docker executor doesn't natively support building ARM images, the machine executor has almost all the necessary depend", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Build and push docker images for linux arm32v7 and arm64v8.\n\nFixes https://github.com/prometheus/promu/issues/89\n\n**Building**\nThe ARM images are building using `binfmt_misc`, which is the same mechan" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\npromu crossbuild -v -p \"linux/amd64 linux/armv7 linux/arm64\"\r\nmake docker DOCKER_REPO=johanneswuerbach\r\nmake docker-publish DOCKER_REPO=johanneswuerbach\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/prometheus/busybox/pull/19. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "Base images to support building and publishing of prometheus components\nfor ARM32v7 and ARM64v8\n\nRequired for https://github.com/prometheus/prometheus/pull/5031", + "codeSnippets": [ + "promu crossbuild -v -p \"linux/amd64 linux/armv7 linux/arm64\"\r\nmake docker DOCKER_REPO=johanneswuerbach\r\nmake docker-publish DOCKER_REPO=johanneswuerbach", + "promu crossbuild -v -p \"linux/amd64 linux/armv7 linux/arm64\"\r\nmake docker DOCKER_REPO=johanneswuerbach\r\nmake docker-publish DOCKER_REPO=johanneswuerbach", + "$ docker pull johanneswuerbach/prometheus:docker-arm && docker run --rm -it johanneswuerbach/prometheus:docker-arm --version\r\ndocker-arm: Pulling from johanneswuerbach/prometheus\r\nDigest: sha256:c8212d84d2d79f32d9b824d9235d3b4ab178e21c3a762a643c8abe12145a2d22\r\nStatus: Image is up to date for johanneswuerbach/prometheus:docker-arm\r\nprometheus, version 2.6.0 (branch: master, revision: 2e725a195a17155dfd1e172a1e1205fc7d6986ec)\r\n build user: root@682d9280fbbf\r\n build date: 20181221-21:56:26\r\n go version: go1.11.4", + "$ docker pull johanneswuerbach/prometheus:docker-arm && docker run --rm -it johanneswuerbach/prometheus:docker-arm --version\r\ndocker-arm: Pulling from johanneswuerbach/prometheus\r\nDigest: sha256:c8212d84d2d79f32d9b824d9235d3b4ab178e21c3a762a643c8abe12145a2d22\r\nStatus: Image is up to date for johanneswuerbach/prometheus:docker-arm\r\nprometheus, version 2.6.0 (branch: master, revision: 2e725a195a17155dfd1e172a1e1205fc7d6986ec)\r\n build user: root@682d9280fbbf\r\n build date: 20181221-22:07:33\r\n go version: go1.11.4", + "$ docker pull prom/prometheus && docker run --rm -it prom/prometheus --version\r\nUsing default tag: latest\r\nlatest: Pulling from prom/prometheus\r\nDigest: sha256:1ffbf5d3c6476384905e8f57c98ac0611f328af68bedb909ec3f350d7e18b134\r\nStatus: Image is up to date for prom/prometheus:latest\r\nstandard_init_linux.go:190: exec user process caused \"exec format error\"" + ] + } + }, + "metadata": { + "tags": [ + "prometheus", + "graduated", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "prometheus" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/prometheus/prometheus/pull/5031", + "repo": "https://github.com/prometheus/prometheus", + "pr": "https://github.com/prometheus/busybox/pull/19" + }, + "reactions": 10, + "comments": 51, + "synthesizedBy": "regex", + "qualityScore": 64 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with prometheus installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:36:17.820Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/prometheus/prometheus-7420-docker-swarm-service-discovery.json b/solutions/cncf-generated/prometheus/prometheus-7420-docker-swarm-service-discovery.json new file mode 100644 index 00000000..d4244301 --- /dev/null +++ b/solutions/cncf-generated/prometheus/prometheus-7420-docker-swarm-service-discovery.json @@ -0,0 +1,75 @@ +{ + "version": "kc-mission-v1", + "name": "prometheus-7420-docker-swarm-service-discovery", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "prometheus: Docker Swarm service discovery", + "description": "This is a docker swarm implementation without business logic.", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This is a docker swarm implementation without business logic." + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nprometheus:\r\n image: prom/prometheus\r\n networks:\r\n - monitor\r\n ports:\r\n - \"9090:9090\"\r\n command:\r\n - '--config.file=/etc/prometheus/prometheus.yml'\r\n - '--storage.tsdb.path=/prometheus'\r\n - '--storage.tsdb.retention=${PROMETHEUS_RETENTION:-24h}'\r\n volumes:\r\n - prometheus:/prometheus\r\n - /home/efs/devops/dsm:/etc/prometheus:ro\r\n - /var/run/docker.sock:/var/run/docker.sock:ro\r\n deploy:\r\n mode: replicated\r\n replicas: 1\r\n resou\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> Docker (Swarm) allows you to access the docker.sock; I was under the impression that the 'host' parameter (of prometheus) only accepts a address. Are you saying that one can also use file:// as a value for host?\n> \n> 2375 is by default not exposed, nor is 2376. Since 19.03 this is also considered malpractice, as docker can be managed through SSH (DOCKER_HOST=ssh://manager-node.domain) - which means you no longer need to expose the socket insecurely (2375) or setup a separate PKI (2376) but rather securely manage your Swarm using your existing PKI (SSH). Effectively the Docker client establishes an SSH session to the ${DOCKER_HOST} and communicates with the Unix socket (/var/run/docker.sock) of that host.\n\nCan you comment on the issue #7603? thanks", + "codeSnippets": [ + "prometheus:\r\n image: prom/prometheus\r\n networks:\r\n - monitor\r\n ports:\r\n - \"9090:9090\"\r\n command:\r\n - '--config.file=/etc/prometheus/prometheus.yml'\r\n - '--storage.tsdb.path=/prometheus'\r\n - '--storage.tsdb.retention=${PROMETHEUS_RETENTION:-24h}'\r\n volumes:\r\n - prometheus:/prometheus\r\n - /home/efs/devops/dsm:/etc/prometheus:ro\r\n - /var/run/docker.sock:/var/run/docker.sock:ro\r\n deploy:\r\n mode: replicated\r\n replicas: 1\r\n resou", + "prometheus:\r\n image: prom/prometheus\r\n networks:\r\n - monitor\r\n ports:\r\n - \"9090:9090\"\r\n command:\r\n - '--config.file=/etc/prometheus/prometheus.yml'\r\n - '--storage.tsdb.path=/prometheus'\r\n - '--storage.tsdb.retention=${PROMETHEUS_RETENTION:-24h}'\r\n volumes:\r\n - prometheus:/prometheus\r\n - /home/efs/devops/dsm:/etc/prometheus:ro\r\n - /var/run/docker.sock:/var/run/docker.sock:ro\r\n deploy:\r\n mode: replicated\r\n replicas: 1\r\n resources:\r\n limits:\r\n memory: 1024M\r\n reservations:\r\n memory: 128M", + "scrape_configs:\r\n - job_name: 'docker'\r\n dockerswarm_sd_configs:\r\n - host: unix:///var/run/docker.sock\r\n role: nodes" + ] + } + }, + "metadata": { + "tags": [ + "prometheus", + "graduated", + "observability", + "troubleshoot" + ], + "cncfProjects": [ + "prometheus" + ], + "targetResourceKinds": [ + "Service" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/prometheus/prometheus/pull/7420", + "repo": "https://github.com/prometheus/prometheus" + }, + "reactions": 5, + "comments": 20, + "synthesizedBy": "regex", + "qualityScore": 67 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with prometheus installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:36:23.584Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/rook/rook-3128-ceph-add-metrics-for-flexvolume-driver.json b/solutions/cncf-generated/rook/rook-3128-ceph-add-metrics-for-flexvolume-driver.json new file mode 100644 index 00000000..5d453103 --- /dev/null +++ b/solutions/cncf-generated/rook/rook-3128-ceph-add-metrics-for-flexvolume-driver.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "rook-3128-ceph-add-metrics-for-flexvolume-driver", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "rook: ceph: add metrics for flexvolume driver", + "description": "This allows to export persistent volume metrics.\n\n**Description of your changes:**\nAccording to https://github.com/kubernetes/kubernetes/issues/67400 in kubernetes 1.13 metrics support for flex volume was added.\nI wonder, is adding support metrics flag enough to fix the issue?\n\n**Which issue is resolved by this Pull Request:**\n\n**Checklist:**", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This allows to export persistent volume metrics.\n\n**Description of your changes:**\nAccording to https://github.com/kubernetes/kubernetes/issues/67400 in kubernetes 1.13 metrics support for flex volume" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\napiVersion: apps/v1\r\nkind: StatefulSet\r\nmetadata:\r\n name: test-pgsql\r\nspec:\r\n serviceName: pgsql\r\n replicas: 1\r\n selector:\r\n matchLabels:\r\n name: test-pgsql\r\n template:\r\n metadata:\r\n labels:\r\n name: test-pgsql\r\n spec:\r\n containers:\r\n - name: postgres\r\n image: postgres:10\r\n lifecycle:\r\n preStop:\r\n exec:\r\n command: [\"/etc/init.d/postgres\", \"stop\"]\r\n ports:\r\n - containerPort: 5432\r\n name: \n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/kubestellar/console-kb/pull/6. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "# CNCF Mission Generation Report (All Batches)\n\n**Date:** 2026-02-27T17:50:38Z\n**Batches:** 11\n\n---\n## Batch: generation-report-0.md\n# CNCF Mission Generation Report\n\n**Date:** 2026-02-27T17:47:59.472Z\n**Total generated:** 184\n**Skipped (duplicates):** 0\n**Errors:** 0\n\n## Projects Processed\n\n| Project | Maturity | Issues Found | Missions Generated | Errors |\n|---------|----------|-------------|-------------------|--------|\n| argo | graduated | 20 | 20 | 0 |\n| cert-manager | graduated | 17 | 17 | 0 |\n| cilium | graduated | 15 | 15 | 0 |\n| cloudevents | graduated | 3 | 3 | 0 |\n| containerd | graduated | 15 | 15 | 0 |\n| coredns | graduated | 6 | 6 | 0 |\n| cri-o | graduated | 14 | 14 | 0 |\n| crossplane | graduated | 8 | 8 | 0 |\n| cubefs | graduated | 0 | 0 | 0 |\n| dapr | graduated | 8 | 8 | 0 |\n| dragonfly | graduated | 0 | 0 | 0 |\n| envoy | graduated | 16 | 16 | 0 |\n| etcd | graduated | 12 | 12 | 0 |\n| falco | graduated | 9 | 9 | 0 |\n| fluentd | graduated | 2 | 2 | 0 |\n| flux | graduated | 0 | 0 | 0 |\n| harbor | graduated | 7 | 7 | 0 |\n| helm | graduated | 20 | 20 | 0 |\n| in-toto | graduated | 3 | 3 | 0 |\n| istio | graduated | 9 | 9 | 0 |\n\n## Generated Missions\n\n- **argo: feat: Multiple sources for applications** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/10432)\n- **argo: feat: Extra Helm values from external git repo #5826** (advanced) — [source](https://github.com/argoproj/argo-cd/pull/6280)\n- **argo: feat(application-controller): Add support for rollback", + "codeSnippets": [ + "apiVersion: apps/v1\r\nkind: StatefulSet\r\nmetadata:\r\n name: test-pgsql\r\nspec:\r\n serviceName: pgsql\r\n replicas: 1\r\n selector:\r\n matchLabels:\r\n name: test-pgsql\r\n template:\r\n metadata:\r\n labels:\r\n name: test-pgsql\r\n spec:\r\n containers:\r\n - name: postgres\r\n image: postgres:10\r\n lifecycle:\r\n preStop:\r\n exec:\r\n command: [\"/etc/init.d/postgres\", \"stop\"]\r\n ports:\r\n - containerPort: 5432\r\n name:", + "apiVersion: apps/v1\r\nkind: StatefulSet\r\nmetadata:\r\n name: test-pgsql\r\nspec:\r\n serviceName: pgsql\r\n replicas: 1\r\n selector:\r\n matchLabels:\r\n name: test-pgsql\r\n template:\r\n metadata:\r\n labels:\r\n name: test-pgsql\r\n spec:\r\n containers:\r\n - name: postgres\r\n image: postgres:10\r\n lifecycle:\r\n preStop:\r\n exec:\r\n command: [\"/etc/init.d/postgres\", \"stop\"]\r\n ports:\r\n - containerPort: 5432\r\n name: pgsql\r\n protocol: TCP\r\n volumeMounts:\r\n - mountPath: /var/lib/postgresql/data\r\n name: pgsql\r\n subPath: postgres\r\n volumeClaimTemplates:\r\n - metadata:\r\n name: pgsql\r\n spec:\r\n accessModes: \r\n - ReadWriteOnce\r\n resources:\r\n requests:\r\n storage: 5Gi\r\n storageClassName: rook-ceph-block", + "kubelet_volume_stats_available_bytes{namespace=\"default\",persistentvolumeclaim=\"pgsql-test-pgsql-0\"} 5.275631616e+09\r\nkubelet_volume_stats_capacity_bytes{namespace=\"default\",persistentvolumeclaim=\"pgsql-test-pgsql-0\"} 5.356126208e+09\r\nkubelet_volume_stats_inodes{namespace=\"default\",persistentvolumeclaim=\"pgsql-test-pgsql-0\"} 2.62144e+06\r\nkubelet_volume_stats_inodes_free{namespace=\"default\",persistentvolumeclaim=\"pgsql-test-pgsql-0\"} 2.620459e+06\r\nkubelet_volume_stats_inodes_used{namespace=\"default\",persistentvolumeclaim=\"pgsql-test-pgsql-0\"} 981\r\nkubelet_volume_stats_used_bytes{namespace=\"default\",persistentvolumeclaim=\"pgsql-test-pgsql-0\"} 8.0494592e+07", + "from os import statvfs\r\nfrom json import dumps\r\n\r\n\r\ndef get_metrics(path: str):\r\n stats = statvfs(path)\r\n\r\n return {\r\n 'available': stats.f_bavail * stats.f_bsize,\r\n 'capacity': stats.f_blocks * stats.f_bsize,\r\n 'usage': (\r\n stats.f_blocks - stats.f_bfree\r\n ) * stats.f_bsize,\r\n 'iNodes': stats.f_files,\r\n 'iNodesFree': stats.f_ffree,\r\n 'iNodesUsed': stats.f_files - stats.f_ffree,\r\n }\r\n\r\n\r\nif __name__ == '__main__':\r\n print(dumps(\r\n get_metrics('/var/lib/postgresql/data')\r\n ))", + "{\"iNodesFree\": 2620459, \"usage\": 80494592, \"iNodes\": 2621440, \"available\": 5275631616, \"capacity\": 5356126208, \"iNodesUsed\": 981}" + ] + } + }, + "metadata": { + "tags": [ + "rook", + "graduated", + "storage", + "troubleshoot" + ], + "cncfProjects": [ + "rook" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/rook/rook/pull/3128", + "repo": "https://github.com/rook/rook", + "pr": "https://github.com/kubestellar/console-kb/pull/6" + }, + "reactions": 23, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with rook installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:20.828Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/rook/rook-3228-cassandra-jmx-prometheus-exporter.json b/solutions/cncf-generated/rook/rook-3228-cassandra-jmx-prometheus-exporter.json new file mode 100644 index 00000000..6814f327 --- /dev/null +++ b/solutions/cncf-generated/rook/rook-3228-cassandra-jmx-prometheus-exporter.json @@ -0,0 +1,73 @@ +{ + "version": "kc-mission-v1", + "name": "rook-3228-cassandra-jmx-prometheus-exporter", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "rook: cassandra: JMX prometheus exporter", + "description": "Export prometheus metrics for cassandra in sidecar container\n\n**Description of your changes:**\nAdd jmx exporter with standard configuration to sidecar container.\n\nTest steps:\n* Start minikube with `tests/scripts/minikube.sh up`\n* Deploy cassandra from `cluster/examples/kubernetes/cassandra`\n* Get the metrics with curl\n\nThis dashboard seems ok https://grafana.com/dashboards/5408 for monitoring\n\n**Which issue is resolved by this Pull Request:**\n\n**Checklist:**\n\n[test ca", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Export prometheus metrics for cassandra in sidecar container\n\n**Description of your changes:**\nAdd jmx exporter with standard configuration to sidecar container.\n\nTest steps:\n* Start minikube with `te" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nrm -rf vendor\r\nGO111MODULE=off make vendor\r\nGO111MODULE=off make codegen\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "@nabokihms thanks a lot for your effort!\nOverall, this looks ready to merge, only very small things remaining:\n\n* Rebase on latest master and resolve merge conflict of Release Notes.\n\n* Since this PR introduces a new field, you have to regenerate the Cassandra client code (Deepcopy methods). The procedure I followed is:\n\n```bash\nrm -rf vendor\nGO111MODULE=off make vendor\nGO111MODULE=off make codegen\n```\n\n* The `test.go` file has a newline has changed (newline) while it shouldn't be touched by this PR. Can you revert that change?\n\n* Add some commands to help the user restart their Cassandra Cluster in the event of a ConfigMap change:\n\n```bash\nNAMESPACE=\nCLUSTER=\n\nRACKS=$(kubectl get sts -n ${NAMESPACE} -l \"cassandra.rook.io/cluster=${CLUSTER}\")\necho ${RACKS} | xargs -n1 kubectl rollout restart -n ${NAMESPACE}\n```\n\nTo make it more comfortable, I included these changes in a review branch, so you can see what I mean:\nhttps://github.com/yanniszark/rook/tree/cassandra-jmx-metrics-review\n\nAfter that, we should be ready to merge!", + "codeSnippets": [ + "rm -rf vendor\r\nGO111MODULE=off make vendor\r\nGO111MODULE=off make codegen", + "rm -rf vendor\r\nGO111MODULE=off make vendor\r\nGO111MODULE=off make codegen", + "NAMESPACE=\r\nCLUSTER=\r\n\r\nRACKS=$(kubectl get sts -n ${NAMESPACE} -l \"cassandra.rook.io/cluster=${CLUSTER}\")\r\necho ${RACKS} | xargs -n1 kubectl rollout restart -n ${NAMESPACE}" + ] + } + }, + "metadata": { + "tags": [ + "rook", + "graduated", + "storage", + "troubleshoot" + ], + "cncfProjects": [ + "rook" + ], + "targetResourceKinds": [], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/rook/rook/pull/3228", + "repo": "https://github.com/rook/rook" + }, + "reactions": 11, + "comments": 14, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with rook installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:24.013Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/spin/spin-786-feat-bump-bindle-to-v0-9-0-rc-1.json b/solutions/cncf-generated/spin/spin-786-feat-bump-bindle-to-v0-9-0-rc-1.json new file mode 100644 index 00000000..fce3d448 --- /dev/null +++ b/solutions/cncf-generated/spin/spin-786-feat-bump-bindle-to-v0-9-0-rc-1.json @@ -0,0 +1,94 @@ +{ + "version": "kc-mission-v1", + "name": "spin-786-feat-bump-bindle-to-v0-9-0-rc-1", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "spin: feat: bump bindle to v0.9.0-rc.1", + "description": "resolve https://github.com/fermyon/spin/issues/689\n\n**Setup**\n\n```\n# clone this PR and build spin\ncargo build\n\n# clone bindle dockerize PR https://github.com/deislabs/bindle/pull/343 and build a docker image\ndocker build -t bindle .\n\n# create a bindle directory and a key\nBINDLE_TEMP=$(mktemp -d)\necho $BINDLE_TEMP\nexport BINDLE_KEYRING=$BINDLE_TEMP/keyring.toml\n./target/debug/bindle keys create \"VishnuJin\" -f $BINDLE_TEMP/secret_keys.toml\n\n# start bindle server\ndocker run --name b", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "resolve https://github.com/fermyon/spin/issues/689\n\n**Setup**\n\n```\n# clone this PR and build spin\ncargo build\n\n# clone bindle dockerize PR https://github.com/deislabs/bindle/pull/343 and build a docke" + }, + { + "title": "Build.", + "description": "Build." + }, + { + "title": "Run the server.", + "description": "Run the server." + }, + { + "title": "Test `/healthz`.", + "description": "Test `/healthz`." + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n# clone this PR and build spin\r\ncargo build\r\n\r\n# clone bindle dockerize PR https://github.com/deislabs/bindle/pull/343 and build a docker image\r\ndocker build -t bindle .\r\n\r\n# create a bindle directory and a key\r\nBINDLE_TEMP=$(mktemp -d)\r\necho $BINDLE_TEMP\r\nexport BINDLE_KEYRING=$BINDLE_TEMP/keyring.toml\r\n./target/debug/bindle keys create \"VishnuJin\" -f $BINDLE_TEMP/secret_keys.toml\r\n\r\n# start bindle server\r\ndocker run --name bindle -d --restart=unless-stopped -e RUST_LOG=debug -v\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/deislabs/bindle/pull/353. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "resolve https://github.com/deislabs/bindle/issues/350\nref https://github.com/fermyon/spin/pull/786#issuecomment-1283146177\n\n**Test step**\n1. Build.\n```\n> make build\n```\n2. Run the server.\n```\n> ./target/debug/bindle-server --unauthenticated\n```\n3. Test `/healthz`.\n```\n> curl -i http://localhost:8080/healthz\nHTTP/1.1 200 OK\ncontent-type: application/json\ncontent-length: 38\ndate: Thu, 27 Oct 2022 11:37:31 GMT\n\n{\"status\":\"OK\",\"version\":\"0.9.0-rc.1\"}\n```", + "codeSnippets": [ + "# clone this PR and build spin\r\ncargo build\r\n\r\n# clone bindle dockerize PR https://github.com/deislabs/bindle/pull/343 and build a docker image\r\ndocker build -t bindle .\r\n\r\n# create a bindle directory and a key\r\nBINDLE_TEMP=$(mktemp -d)\r\necho $BINDLE_TEMP\r\nexport BINDLE_KEYRING=$BINDLE_TEMP/keyring.toml\r\n./target/debug/bindle keys create \"VishnuJin\" -f $BINDLE_TEMP/secret_keys.toml\r\n\r\n# start bindle server\r\ndocker run --name bindle -d --restart=unless-stopped -e RUST_LOG=debug -v", + "# clone this PR and build spin\r\ncargo build\r\n\r\n# clone bindle dockerize PR https://github.com/deislabs/bindle/pull/343 and build a docker image\r\ndocker build -t bindle .\r\n\r\n# create a bindle directory and a key\r\nBINDLE_TEMP=$(mktemp -d)\r\necho $BINDLE_TEMP\r\nexport BINDLE_KEYRING=$BINDLE_TEMP/keyring.toml\r\n./target/debug/bindle keys create \"VishnuJin\" -f $BINDLE_TEMP/secret_keys.toml\r\n\r\n# start bindle server\r\ndocker run --name bindle -d --restart=unless-stopped -e RUST_LOG=debug -v $BINDLE_TEMP:/bindle-data -p 8080:8080 bindle\r\n# show bindle server logs\r\ndocker logs bindle -f", + "# export BINDLE_TEMP to the folder which we created in the last terminal\r\nexport BINDLE_TEMP=\r\n\r\nexport BINDLE_KEYRING_FILE=$BINDLE_TEMP/keyring.toml\r\nexport BINDLE_SECRET_FILE=$BINDLE_TEMP/secret_keys.toml\r\nexport BINDLE_LABEL=\"VishnuJin\"\r\n\r\n# create a spin app\r\n./target/debug/spin new http-rust myapp\r\n\r\n# build spin app\r\ncd myapp\r\n# remember to update spin_sdk version to v0.5.0 before building it\r\n../target/debug/spin build\r\n\r\n# run spin bindle prepare\r\nexport STAGING_DIR=$(mktemp -d)\r\n../target/debug/spin bindle prepare --staging-dir $STAGING_DIR", + "export BINDLE_URL=http://localhost:8080/v1\r\n../target/debug/spin bindle push", + "# start consul\r\nconsul agent -dev\r\n\r\n# start nomad\r\nnomad agent -dev\r\n\r\n# clone the latest hippo and start hippo\r\nexport Database__Driver=sqlite\r\nexport ConnectionStrings__Database=\"Data Source=hippo.db;Cache=Shared\"\r\nexport ConnectionStrings__Bindle=\"Address=http://127.0.0.1:8080/v1\"\r\nexport Nomad__Driver=\"raw_exec\"\r\nexport Jwt__Key=\"ceci n'est pas une jeton\"\r\nexport Jwt__Issuer=\"localhost\"\r\nexport Jwt__Audience=\"localhost\"\r\ncd src/Web\r\ndotnet build\r\ndotnet run\r\n\r\n# Run spin deploy in myapp\r\nexport HIPPO_USERNAME=\"admin\"\r\nexport HIPPO_PASSWORD=\"p@ssword\"\r\nexport HIPPO_URL=\"http://localhost:5309\"\r\n../target/debug/spin deploy --deploy-existing-bindle" + ] + } + }, + "metadata": { + "tags": [ + "spin", + "sandbox", + "runtime", + "troubleshoot" + ], + "cncfProjects": [ + "spin" + ], + "targetResourceKinds": [ + "Secret" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/spinframework/spin/pull/786", + "repo": "https://github.com/spinframework/spin", + "pr": "https://github.com/deislabs/bindle/pull/353" + }, + "reactions": 2, + "comments": 12, + "synthesizedBy": "regex", + "qualityScore": 73 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with spin installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:43:17.549Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/strimzi/strimzi-11210-add-support-for-volumeattributesclassname-on-persistent-storage.json b/solutions/cncf-generated/strimzi/strimzi-11210-add-support-for-volumeattributesclassname-on-persistent-storage.json new file mode 100644 index 00000000..7a7e342a --- /dev/null +++ b/solutions/cncf-generated/strimzi/strimzi-11210-add-support-for-volumeattributesclassname-on-persistent-storage.json @@ -0,0 +1,79 @@ +{ + "version": "kc-mission-v1", + "name": "strimzi-11210-add-support-for-volumeattributesclassname-on-persistent-storage", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "strimzi: Add support for volumeAttributesClassName on persistent storage", + "description": "Currently, it is not possible to set the volumeAttributesClassName in the generated PVC using the persistent storage type. This enables that feature by adding a volumeAttributesClass field to the persistent storage that maps to the PVC.\n\nChanges to the StorageDiff was required because it fails when a field is changed other than si", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Currently, it is not possible to set the volumeAttributesClassName in the generated PVC using the persistent storage type. This enables that feature by adding a volumeAttributesClass field to the pers" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nkind: Cluster\r\napiVersion: kind.x-k8s.io/v1alpha4\r\nname: strimzi\r\ncontainerdConfigPatches:\r\n- |\r\n [plugins.\"io.containerd.grpc.v1.cri\".registry]\r\n config_path = \"/etc/containerd/certs.d\"\r\nfeatureGates:\r\n VolumeAttributesClass: true # <-- this\r\nruntimeConfig:\r\n storage.k8s.io/v1beta1: \"true\"\r\nnodes:\r\n- role: control-plane\r\n- role: worker\r\n- role: worker\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> So I'm writing my tests in `JbodStorageMockTest`. I have a few non-trivial things I would like to modify and understand.\n>\n> 1. I would like to add `--feature-gates=VolumeAttributesClass=true` to the command line argument to `MockKube3`s kube-apiserver container. I have changes locally like so in the constructor\n> ```java\n> this.apiServer = new ApiServerContainer<>().withEtcdImage(DockerImageName.parse(ETCD_IMAGE))\n> .withCreateContainerCmdModifier(cmd -> {\n> String[] cmdParts = cmd.getCmd();\n> String[] newCmdParts = Arrays.copyOf(cmdParts, cmdParts.length + 1);\n> newCmdParts[cmdParts.length] = \"--feature-gates=VolumeAttributesClass=true\";\n> cmd.withCmd(newCmdParts);\n> });\n> ```\n>\n> I would like to modify `MockKube3` to have a cleaner API, say adding an `withExtraCmdArgs` method or similar. Would appreciate more opinions.\n> \n> 2. Since I'm updating the PVCs programmatically, the updates happen very quickly. When I update the PVC with VAC before it is _Bound_ I get\n> ```\n> Forbidden: spec is immutable after creation except resources.requests and volumeAttributesClassName for bound claims\n> ```\n> How do I wait for the PVC to be bound in the tests?\n> \n> \n> I can push my current changes if you'd like to get the full picture.\n\nWhile the first point I brought up isn't relevant anymore, I am still looking for solution", + "codeSnippets": [ + "kind: Cluster\r\napiVersion: kind.x-k8s.io/v1alpha4\r\nname: strimzi\r\ncontainerdConfigPatches:\r\n- |\r\n [plugins.\"io.containerd.grpc.v1.cri\".registry]\r\n config_path = \"/etc/containerd/certs.d\"\r\nfeatureGates:\r\n VolumeAttributesClass: true # <-- this\r\nruntimeConfig:\r\n storage.k8s.io/v1beta1: \"true\"\r\nnodes:\r\n- role: control-plane\r\n- role: worker\r\n- role: worker", + "kind: Cluster\r\napiVersion: kind.x-k8s.io/v1alpha4\r\nname: strimzi\r\ncontainerdConfigPatches:\r\n- |\r\n [plugins.\"io.containerd.grpc.v1.cri\".registry]\r\n config_path = \"/etc/containerd/certs.d\"\r\nfeatureGates:\r\n VolumeAttributesClass: true # <-- this\r\nruntimeConfig:\r\n storage.k8s.io/v1beta1: \"true\"\r\nnodes:\r\n- role: control-plane\r\n- role: worker\r\n- role: worker", + "I would like to modify `MockKube3` to have a cleaner API, say adding an `withExtraCmdArgs` method or similar. Would appreciate more opinions.\r\n2. Since I'm updating the PVCs programmatically, the updates happen very quickly. When I update the PVC with VAC before it is _Bound_ I get", + "How do I wait for the PVC to be bound in the tests?\r\n\r\nI can push my current changes if you'd like to get the full picture.\nHmm, I did not realize this would need to have the flag enabled :-/.\nIs this something I should keep looking at or should it be paused for now? Maybe until `VolumeAttributesClass` is out of beta and doesn't require a feature flag?\n> Is this something I should keep looking at or should it be paused for now? Maybe until `VolumeAttributesClass` is out of beta and doesn't require a feature flag?\r\n\r\nHi @venkatesh2090 just to check my understanding here. The changes you're suggesting here, if the user hasn't enabled `VolumeAttributesClass` in their Kubernetes (or they are using an older version that doesn't have it) it won't break anything right? The field will just be ignored by Kubernetes?\r\n\r\nBut to test this feature we need to enable a feature flag in MockKube3?\nYes, that is right. I would have to enable that feature flag in MockKube to test this feature. \nI don't think there is an issue going ahead, since MockKube is just used for testing. However, I wonder whether many people would use this feature before it is GA in Kubernetes. Is it something you were hoping to use right away, or are you happy to wait for it to become GA @venkatesh2090 ?\r\n\r\nI'm afraid I'm not familiar enough with the MockKube3 tests to answer your other question.\r\n\r\nTagging @strimzi/maintainers to see what others think.\n> Is it something you were hoping to use right away, or are you happy to wait for it to become GA @venkatesh2090 ?\n\nNo that's fine. I don't need this feature. I just raised a PR because I saw an open issue 🙂.\n\nFeel free to tag me or take over this PR when the feature goes GA. \n> No that's fine. I don't need this feature. I just raised a PR because I saw an open issue 🙂.\r\n\r\nThat's great, we really value your contribution.\r\n\r\nOk so I think we have two options. If you want to continue working on this you could switch up the tests to use Mockito instead of MockKube3. That way we don't need to worry about the feature flag. Or you can close this PR and we will reopen it and tag you once the feature is GA.\r\n\r\nIf you are keen to pick up another task feel free to grab one, or you can drop a message in the the `strimzi-dev` channel in the CNCF Slack org if you want help selecting one :) \nThanks. I'll close it for now and do it properly when it is GA\nVolumeAttributesClass has gone GA since Kubernetes 1.34 and this is a feature that we'd really like so that we could codify higher IOPS/throughput for our Kafka volumes rather than having to make manual modifications to them. It would be really appreciated @venkatesh2090 if you had some time to revive this PR 🙏 \nSure @andyspiers. I will take a look over the weekend.\n> So I'm writing my tests in `JbodStorageMockTest`. I have a few non-trivial things I would like to modify and understand.\r\n>\r\n> 1. I would like to add `--feature-gates=VolumeAttributesClass=true` to the command line argument to `MockKube3`s kube-apiserver container. I have changes locally like so in the constructor\r\n>", + ">\r\n> I would like to modify `MockKube3` to have a cleaner API, say adding an `withExtraCmdArgs` method or similar. Would appreciate more opinions.\r\n> \r\n> 2. Since I'm updating the PVCs programmatically, the updates happen very quickly. When I update the PVC with VAC before it is _Bound_ I get\r\n>" + ] + } + }, + "metadata": { + "tags": [ + "strimzi", + "incubating", + "storage", + "troubleshoot", + "needs-review" + ], + "cncfProjects": [ + "strimzi" + ], + "targetResourceKinds": [ + "Pod", + "Role" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/strimzi/strimzi-kafka-operator/pull/11210", + "repo": "https://github.com/strimzi/strimzi-kafka-operator" + }, + "reactions": 3, + "comments": 25, + "synthesizedBy": "regex", + "qualityScore": 68 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with strimzi installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:53.909Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/strimzi/strimzi-6180-fix-renewal-of-user-provided-cas.json b/solutions/cncf-generated/strimzi/strimzi-6180-fix-renewal-of-user-provided-cas.json new file mode 100644 index 00000000..720e91d8 --- /dev/null +++ b/solutions/cncf-generated/strimzi/strimzi-6180-fix-renewal-of-user-provided-cas.json @@ -0,0 +1,83 @@ +{ + "version": "kc-mission-v1", + "name": "strimzi-6180-fix-renewal-of-user-provided-cas", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "strimzi: Fix renewal of user provided CAs", + "description": "This PR fixes #5466.\nToday, when a user provides his own CA certificate (cluster or client) but then he tries to follow the [renew process](https://strim", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR fixes #5466.\nToday, when a user provides his own CA certificate (cluster or client) but then he tries to follow the [renew process](https://strim" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\nannotations:\r\n clients-ca-thumbprint: hCyUdeqWt6gXDUKYdAtaFYDiPoZlWAwCjW7liqtWkps=\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/strimzi/strimzi-kafka-operator/pull/6240. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This is the documentation related update for PR #6180 which fixes issues #5466.\nThe PR adds more information about certification generations to be added to the Secret for handling your own CA certificates and corresponding renewal.", + "codeSnippets": [ + "annotations:\r\n clients-ca-thumbprint: hCyUdeqWt6gXDUKYdAtaFYDiPoZlWAwCjW7liqtWkps=", + "annotations:\r\n clients-ca-thumbprint: hCyUdeqWt6gXDUKYdAtaFYDiPoZlWAwCjW7liqtWkps=", + "annotations:\r\n clients-ca-thumbprint: hCyUdeqWt6gXDUKYdAtaFYDiPoZlWAwCjW7liqtWkps=\r\n cluster-ca-thumbprint: DoNswf/bRfs6nF1IVx7PhbOVXKvJvjv/wOxuQY8eoTM=" + ] + } + }, + "metadata": { + "tags": [ + "strimzi", + "incubating", + "storage", + "troubleshoot" + ], + "cncfProjects": [ + "strimzi" + ], + "targetResourceKinds": [ + "Pod", + "Secret", + "Role", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/strimzi/strimzi-kafka-operator/pull/6180", + "repo": "https://github.com/strimzi/strimzi-kafka-operator", + "pr": "https://github.com/strimzi/strimzi-kafka-operator/pull/6240" + }, + "reactions": 1, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with strimzi installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:37:57.425Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/submariner/submariner-1202-add-e2e-tests-for-external-connectivity-use-case.json b/solutions/cncf-generated/submariner/submariner-1202-add-e2e-tests-for-external-connectivity-use-case.json new file mode 100644 index 00000000..116fa08e --- /dev/null +++ b/solutions/cncf-generated/submariner/submariner-1202-add-e2e-tests-for-external-connectivity-use-case.json @@ -0,0 +1,80 @@ +{ + "version": "kc-mission-v1", + "name": "submariner-1202-add-e2e-tests-for-external-connectivity-use-case", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "submariner: Add e2e tests for external connectivity use case", + "description": "This PR adds e2e tests for external connectivity use case (see details below).\nThis PR only focus on connectivity that can be achieved without modification for submariner side.\n\nUse case about source IP considerations, which will be part of implementaion of globalnet, is out of the scope of this PR. Tests for such use cases will be added on top of this PR, later.\n\nDependency:\n\nFixes: #1201 \n\n
\n\nExternal applications can connect to remote cluster via local cluster by using submariner, if ", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR adds e2e tests for external connectivity use case (see details below).\nThis PR only focus on connectivity that can be achieved without modification for submariner side.\n\nUse case about source " + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ echo \"replace github.com/submariner-io/shipyard => ../shipyard\" >> go.mod\r\n$ go mod vendor\r\n$ make e2e-external\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/submariner-io/shipyard/pull/499. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR is to add docker and network pod functions to e2e framework. These functions are originally needed for https://github.com/submariner-io/submariner/pull/1202 , however, as a result of discussion, we came to conclusion that it would be nice to add them to this framework to make room to leverage from other tests.", + "codeSnippets": [ + "$ echo \"replace github.com/submariner-io/shipyard => ../shipyard\" >> go.mod\r\n$ go mod vendor\r\n$ make e2e-external", + "$ echo \"replace github.com/submariner-io/shipyard => ../shipyard\" >> go.mod\r\n$ go mod vendor\r\n$ make e2e-external", + "(cd ../shipyard; make images)\r\n\r\necho \"replace github.com/submariner-io/shipyard => ../shipyard\" >> go.mod\r\ngo mod vendor\r\n\r\nexport BASE_BRANCH=dev\r\nexport PLUGIN=/go/src/github.com/submariner-io/submariner/scripts/e2e/external/hook \r\nexport FOCUS=\"\\[external-dataplane\\]\" \r\nexport SKIP=\"\\[dataplane\\]\"\r\n\r\nmake e2e\r\n\r\nmake cleanup" + ] + } + }, + "metadata": { + "tags": [ + "submariner", + "sandbox", + "networking", + "troubleshoot" + ], + "cncfProjects": [ + "submariner" + ], + "targetResourceKinds": [ + "Node" + ], + "difficulty": "beginner", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/submariner-io/submariner/pull/1202", + "repo": "https://github.com/submariner-io/submariner", + "pr": "https://github.com/submariner-io/shipyard/pull/499" + }, + "reactions": 0, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with submariner installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:43:35.864Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/virtual-kubelet/virtual-kubelet-258-consolidate-helm-charts.json b/solutions/cncf-generated/virtual-kubelet/virtual-kubelet-258-consolidate-helm-charts.json new file mode 100644 index 00000000..21f3e3b6 --- /dev/null +++ b/solutions/cncf-generated/virtual-kubelet/virtual-kubelet-258-consolidate-helm-charts.json @@ -0,0 +1,83 @@ +{ + "version": "kc-mission-v1", + "name": "virtual-kubelet-258-consolidate-helm-charts", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "virtual-kubelet: Consolidate helm charts", + "description": "Changes\n- Added an appVersion and icon to helm Chart.yaml\n- Refactored to use `required` in chart manifests rather than\n outputting an error message in notes\n- Namespaced `name` and `fullname` template partials to `vk`\n- Enabled rbac apiVersion configuration\n- Removed role-binding and service-account suffixes from resource\n names\n- Fixed bug where virtual-kubelet service account would not be\n bound to cluster role if chart was installed outside the default\n namespace\n- Removed hardcoded `azu", + "type": "deploy", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "Changes\n- Added an appVersion and icon to helm Chart.yaml\n- Refactored to use `required` in chart manifests rather than\n outputting an error message in notes\n- Namespaced `name` and `fullname` templa" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n#!/bin/bash\r\n\r\n. ./scripts/createCertAndKey.sh &&\r\n\r\nhelm upgrade --install $RELEASE_NAME ./charts/virtual-kubelet \\\r\n --set provider=azure \\\r\n --set rbac.install=true \\\r\n --set providers.azure.tenantId=$AZURE_TENANT_ID \\\r\n --set providers.azure.subscriptionId=$AZURE_SUBSCRIPTION_ID \\\r\n --set providers.azure.clientId=$AZURE_CLIENT_ID \\\r\n --set providers.azure.clientKey=$AZURE_CLIENT_SECRET \\\r\n --set providers.azure.aciResourceGroup=$AZURE_RG \\\r\n --set providers.azure.aciRegion=$ACI_REGIO\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/virtual-kubelet/virtual-kubelet/pull/281. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "This PR updates the VK AKS chart to have RBAC resources and conditionally create them. rbac.install defaults to true. \n\nI'm going to make a corresponding PR to azure cli to add a call to get the rbac status of the cluster and use that flag to drive the chart install.", + "codeSnippets": [ + "#!/bin/bash\r\n\r\n. ./scripts/createCertAndKey.sh &&\r\n\r\nhelm upgrade --install $RELEASE_NAME ./charts/virtual-kubelet \\\r\n --set provider=azure \\\r\n --set rbac.install=true \\\r\n --set providers.azure.tenantId=$AZURE_TENANT_ID \\\r\n --set providers.azure.subscriptionId=$AZURE_SUBSCRIPTION_ID \\\r\n --set providers.azure.clientId=$AZURE_CLIENT_ID \\\r\n --set providers.azure.clientKey=$AZURE_CLIENT_SECRET \\\r\n --set providers.azure.aciResourceGroup=$AZURE_RG \\\r\n --set providers.azure.aciRegion=$ACI_REGIO", + "#!/bin/bash\r\n\r\n. ./scripts/createCertAndKey.sh &&\r\n\r\nhelm upgrade --install $RELEASE_NAME ./charts/virtual-kubelet \\\r\n --set provider=azure \\\r\n --set rbac.install=true \\\r\n --set providers.azure.tenantId=$AZURE_TENANT_ID \\\r\n --set providers.azure.subscriptionId=$AZURE_SUBSCRIPTION_ID \\\r\n --set providers.azure.clientId=$AZURE_CLIENT_ID \\\r\n --set providers.azure.clientKey=$AZURE_CLIENT_SECRET \\\r\n --set providers.azure.aciResourceGroup=$AZURE_RG \\\r\n --set providers.azure.aciRegion=$ACI_REGION \\\r\n --set apiserverCert=$cert \\\r\n --set apiserverKey=$key" + ] + } + }, + "metadata": { + "tags": [ + "virtual-kubelet", + "sandbox", + "app-definition", + "deploy" + ], + "cncfProjects": [ + "virtual-kubelet" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Secret", + "Namespace", + "Role" + ], + "difficulty": "intermediate", + "issueTypes": [ + "deploy" + ], + "maturity": "sandbox", + "sourceUrls": { + "issue": "https://github.com/virtual-kubelet/virtual-kubelet/pull/258", + "repo": "https://github.com/virtual-kubelet/virtual-kubelet", + "pr": "https://github.com/virtual-kubelet/virtual-kubelet/pull/281" + }, + "reactions": 2, + "comments": 11, + "synthesizedBy": "regex", + "qualityScore": 70 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with virtual-kubelet installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:43:06.576Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/vitess/vitess-12194-bug-fix-cache-filtered-out-tablets-in-topology-watcher-to-avoid-unn.json b/solutions/cncf-generated/vitess/vitess-12194-bug-fix-cache-filtered-out-tablets-in-topology-watcher-to-avoid-unn.json new file mode 100644 index 00000000..8465da85 --- /dev/null +++ b/solutions/cncf-generated/vitess/vitess-12194-bug-fix-cache-filtered-out-tablets-in-topology-watcher-to-avoid-unn.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "vitess-12194-bug-fix-cache-filtered-out-tablets-in-topology-watcher-to-avoid-unn", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "vitess: Bug fix: Cache filtered out tablets in topology watcher to avoid unnecessary GetTablet calls to topo", + "description": "This addresses https://github.com/vitessio/vitess/issues/12179 by moving the point at which the topology watcher applies the provided `TabletFilter` in order to avoid too many `GetTablet` calls to the topology server. A failing test was added to confirm that this PR fixes the referenced issue", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This addresses https://github.com/vitessio/vitess/issues/12179 by moving the point at which the topology watcher applies the provided `TabletFilter` in order to avoid too many `GetTablet` calls to the" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n$ sha=\"b9ce758c6e60bffeea025676bd15d5a0b17c0c70\"\r\n$ g cherry-pick $sha \r\n$ g commit --amend -m \"backport of https://github.com/vitessio/vitess/pull/12194\"\n```" + }, + { + "title": "Review the fix", + "description": "The fix was implemented in https://github.com/slackhq/vitess/pull/43. Review the changes to understand the solution." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "## Description\n\nThis is a backport of https://github.com/vitessio/vitess/pull/12194 generated by:\n```\n$ sha=\"b9ce758c6e60bffeea025676bd15d5a0b17c0c70\"\n$ g cherry-pick $sha \n$ g commit --amend -m \"backport of https://github.com/vitessio/vitess/pull/12194\"\n```", + "codeSnippets": [ + "$ sha=\"b9ce758c6e60bffeea025676bd15d5a0b17c0c70\"\r\n$ g cherry-pick $sha \r\n$ g commit --amend -m \"backport of https://github.com/vitessio/vitess/pull/12194\"", + "$ sha=\"b9ce758c6e60bffeea025676bd15d5a0b17c0c70\"\r\n$ g cherry-pick $sha \r\n$ g commit --amend -m \"backport of https://github.com/vitessio/vitess/pull/12194\"" + ] + } + }, + "metadata": { + "tags": [ + "vitess", + "graduated", + "storage", + "troubleshoot", + "type--bug", + "component--cluster-management", + "type--performance" + ], + "cncfProjects": [ + "vitess" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "graduated", + "sourceUrls": { + "issue": "https://github.com/vitessio/vitess/pull/12194", + "repo": "https://github.com/vitessio/vitess", + "pr": "https://github.com/slackhq/vitess/pull/43" + }, + "reactions": 3, + "comments": 3, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with vitess installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:35:54.320Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/volcano/volcano-2203-fix-deploy-add-helm-pre-delete-hook-to-delete-orphan-resources.json b/solutions/cncf-generated/volcano/volcano-2203-fix-deploy-add-helm-pre-delete-hook-to-delete-orphan-resources.json new file mode 100644 index 00000000..91f0742e --- /dev/null +++ b/solutions/cncf-generated/volcano/volcano-2203-fix-deploy-add-helm-pre-delete-hook-to-delete-orphan-resources.json @@ -0,0 +1,77 @@ +{ + "version": "kc-mission-v1", + "name": "volcano-2203-fix-deploy-add-helm-pre-delete-hook-to-delete-orphan-resources", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "volcano: fix(deploy): add helm pre delete hook to delete orphan resources", + "description": "This PR is trying to resolve https://github.com/volcano-sh/volcano/issues/2191, it uses helm hooks to uninstall orphan resources before uninstalling the helm package.\n\nIt can perfectly solve the above issue when using helm to deploy, but there still exists two problems:\n1. A third-party kubectl dependency will be introduced\n2. Installing volcano through `installer/volcano-deployment.yaml` will not take effect, which means issue https://github.com/volcano-sh/volcano/issues/2079, https://github.co", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "This PR is trying to resolve https://github.com/volcano-sh/volcano/issues/2191, it uses helm hooks to uninstall orphan resources before uninstalling the helm package.\n\nIt can perfectly solve the above" + }, + { + "title": "helm is usually the more recommended deployment method(personally preferred w...", + "description": "helm is usually the more recommended deployment method(personally preferred way), this pr can completely solve the problem of helm deployment." + }, + { + "title": "I think it is difficult to solve it in one file when installing by volcano-de...", + "description": "I think it is difficult to solve it in one file when installing by volcano-deployment.yaml. The essential reason is that deleting these configurations requires the permission of service account, but uninstalling resources in one file cannot ensure the deletion order of sa and other resources, while this is guaranteed by the helm pre-delete hooks." + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> For the second reason, it doesn't feel like this change is universally applicable. Preferably there is a way to remove the residual webhook deployed both ways.\n\nYes, my personal opinion is:\n1. helm is usually the more recommended deployment method(personally preferred way), this pr can completely solve the problem of helm deployment.\n2. I think it is difficult to solve it in one file when installing by volcano-deployment.yaml. The essential reason is that deleting these configurations requires the permission of service account, but uninstalling resources in one file cannot ensure the deletion order of sa and other resources, while this is guaranteed by the helm pre-delete hooks.\n\nThe two way I think may be solved is:\n* Tell the user to manually delete the webhook residual configuration when using volcano-deployment.yaml.\n* It may be possible to declare the webhook resource directly in yaml, but I'm not sure if it will work, and it needs a major modification.\n\nBTW, I don't know if this is the final solution, but it solved my helm deployment problem. So looking forward to more voices.", + "codeSnippets": [] + } + }, + "metadata": { + "tags": [ + "volcano", + "incubating", + "orchestration", + "troubleshoot", + "lgtm", + "size-m" + ], + "cncfProjects": [ + "volcano" + ], + "targetResourceKinds": [ + "Deployment" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/volcano-sh/volcano/pull/2203", + "repo": "https://github.com/volcano-sh/volcano" + }, + "reactions": 3, + "comments": 10, + "synthesizedBy": "regex", + "qualityScore": 63 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with volcano installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:25.028Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +} diff --git a/solutions/cncf-generated/volcano/volcano-4581-feat-add-ray-plugin-for-job.json b/solutions/cncf-generated/volcano/volcano-4581-feat-add-ray-plugin-for-job.json new file mode 100644 index 00000000..732c86b3 --- /dev/null +++ b/solutions/cncf-generated/volcano/volcano-4581-feat-add-ray-plugin-for-job.json @@ -0,0 +1,82 @@ +{ + "version": "kc-mission-v1", + "name": "volcano-4581-feat-add-ray-plugin-for-job", + "missionClass": "solution", + "author": "KubeStellar Bot", + "authorGithub": "kubestellar", + "mission": { + "title": "volcano: feat: add ray plugin for job", + "description": "--- # without ray plugin\napiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n name: ray-cluster-job\nspec:\n minAvailable: 3\n schedulerName: volcano\n plugins:\n svc: []\n queue: default\n policies:\n - event: PodEvicted\n action: RestartJob\n tasks:\n - replicas: 1\n name: head\n template:\n spec:\n containers:\n - name: head\n command:\n - sh\n - -c\n - ray start --head --block --port=6379 --", + "type": "troubleshoot", + "status": "completed", + "steps": [ + { + "title": "Understand the problem", + "description": "--- # without ray plugin\napiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n name: ray-cluster-job\nspec:\n minAvailable: 3\n schedulerName: volcano\n plugins:\n svc: []\n queue: default\n po" + }, + { + "title": "Apply the configuration", + "description": "Apply the following configuration to your cluster:\n```yaml\n--- # without ray plugin\r\napiVersion: batch.volcano.sh/v1alpha1\r\nkind: Job\r\nmetadata:\r\n name: ray-cluster-job\r\nspec:\r\n minAvailable: 3\r\n schedulerName: volcano\r\n plugins:\r\n svc: []\r\n queue: default\r\n policies:\r\n - event: PodEvicted\r\n action: RestartJob\r\n tasks:\r\n - replicas: 1\r\n name: head\r\n template:\r\n spec:\r\n containers:\r\n - name: head\r\n command:\r\n - sh\r\n - -c\r\n - ray start --hea\n```" + }, + { + "title": "Verify the fix", + "description": "Confirm that the issue is resolved in your environment by testing the affected functionality." + } + ], + "resolution": { + "summary": "> Sep 19 16:33:27 integration-worker3 kubelet[272]: E0919 16:33:27.945147 272 pod_workers.go:1301] \"Error syncing pod, skipping\" err=\"failed to \"StartContainer\" for \"tensorflow\" with ErrImagePull: \"failed to pull and unpack image \\\"docker.io/volcanosh/dist-mnist-tf-example:0.0.1\\\": failed to extract layer sha256:5061983e267fa42a4403de0a25f41e4ca5ba0c75db3b2a4ceae769f94035816b: write /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/154/fs/opt/conda/lib/python3.5/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so: no space left on device\"\" pod=\"x2n8bo60/tensorflow-dist-mnist-ps-0\" podUID=\"89d374cc-6b09-4498-a235-163431fcdb72\"\n\nYou can use `sudo docker system prune -a -f` or `sudo crictl rmi --prune` to release space before run a e2e.", + "codeSnippets": [ + "--- # without ray plugin\r\napiVersion: batch.volcano.sh/v1alpha1\r\nkind: Job\r\nmetadata:\r\n name: ray-cluster-job\r\nspec:\r\n minAvailable: 3\r\n schedulerName: volcano\r\n plugins:\r\n svc: []\r\n queue: default\r\n policies:\r\n - event: PodEvicted\r\n action: RestartJob\r\n tasks:\r\n - replicas: 1\r\n name: head\r\n template:\r\n spec:\r\n containers:\r\n - name: head\r\n command:\r\n - sh\r\n - -c\r\n - ray start --hea", + "--- # without ray plugin\r\napiVersion: batch.volcano.sh/v1alpha1\r\nkind: Job\r\nmetadata:\r\n name: ray-cluster-job\r\nspec:\r\n minAvailable: 3\r\n schedulerName: volcano\r\n plugins:\r\n svc: []\r\n queue: default\r\n policies:\r\n - event: PodEvicted\r\n action: RestartJob\r\n tasks:\r\n - replicas: 1\r\n name: head\r\n template:\r\n spec:\r\n containers:\r\n - name: head\r\n command:\r\n - sh\r\n - -c\r\n - ray start --head --block --port=6379 --dashboard-host=0.0.0.0;\r\n image: rayproject/ray:latest-py311-cpu\r\n ports:\r\n - containerPort: 8265\r\n name: dashboard\r\n - containerPort: 6379\r\n name: gcs\r\n - containerPort: 10001\r\n name: client\r\n resources: {}\r\n restartPolicy: OnFailure\r\n - replicas: 2\r\n name: worker\r\n template:\r\n spec:\r\n containers:\r\n - name: worker\r\n command:\r\n - sh\r\n - -c\r\n - |\r\n ray start --block --address=ray-cluster-job-head-0.ray-cluster-job:6379\r\n image: rayproject/ray:latest-py311-cpu\r\n resources: {}\r\n restartPolicy: OnFailure", + "--- # with ray plugin\r\napiVersion: batch.volcano.sh/v1alpha1\r\nkind: Job\r\nmetadata:\r\n name: ray-cluster-job\r\nspec:\r\n minAvailable: 3\r\n schedulerName: volcano\r\n plugins:\r\n ray: []\r\n svc: []\r\n policies:\r\n - event: PodEvicted\r\n action: RestartJob\r\n queue: default\r\n tasks:\r\n - replicas: 1\r\n name: head\r\n\r\n template:\r\n spec:\r\n containers:\r\n - name: head\r\n image: rayproject/ray:latest-py311-cpu\r\n resources: {}\r\n restartPolicy: OnFailure\r\n - replicas: 2\r\n name: worker\r\n template:\r\n spec:\r\n containers:\r\n - name: worker\r\n image: rayproject/ray:latest-py311-cpu\r\n resources: {}\r\n restartPolicy: Never" + ] + } + }, + "metadata": { + "tags": [ + "volcano", + "incubating", + "orchestration", + "troubleshoot", + "approved", + "lgtm", + "ok-to-test", + "size-xl" + ], + "cncfProjects": [ + "volcano" + ], + "targetResourceKinds": [ + "Pod", + "Service", + "Job", + "Node" + ], + "difficulty": "intermediate", + "issueTypes": [ + "troubleshoot" + ], + "maturity": "incubating", + "sourceUrls": { + "issue": "https://github.com/volcano-sh/volcano/pull/4581", + "repo": "https://github.com/volcano-sh/volcano" + }, + "reactions": 3, + "comments": 21, + "synthesizedBy": "regex", + "qualityScore": 61 + }, + "prerequisites": { + "kubernetes": ">=1.24", + "tools": [ + "kubectl" + ], + "description": "A running Kubernetes cluster with volcano installed or the issue environment reproducible." + }, + "security": { + "scannedAt": "2026-03-02T19:38:26.142Z", + "scannerVersion": "cncf-gen-2.0.0", + "sanitized": true, + "findings": [] + } +}