diff --git a/README.md b/README.md
index 126b52ada..c2c784dab 100644
--- a/README.md
+++ b/README.md
@@ -403,6 +403,7 @@ The following sets of tools are available (toolsets marked with ✓ in the Defau
| config | View and manage the current local Kubernetes configuration (kubeconfig) | ✓ |
| core | Most common tools for Kubernetes management (Pods, Generic Resources, Events, etc.) | ✓ |
| kcp | Manage kcp workspaces and multi-tenancy features | |
+| openshift | OpenShift-specific tools for cluster management and troubleshooting, check the [OpenShift documentation](docs/OPENSHIFT.md) for more details. | |
| ossm | Most common tools for managing OSSM, check the [OSSM documentation](https://github.com/openshift/openshift-mcp-server/blob/main/docs/OSSM.md) for more details. | |
| kubevirt | KubeVirt virtual machine management tools | |
| observability | Cluster observability tools for querying Prometheus metrics and Alertmanager alerts | ✓ |
@@ -696,6 +697,25 @@ Common use cases:
+
+
+openshift
+
+- **plan_mustgather** - Plan for collecting a must-gather archive from an OpenShift cluster, must-gather is a tool for collecting cluster data related to debugging and troubleshooting like logs, kubernetes resources, etc.
+ - `node_name` (`string`) - Optional node to run the mustgather pod. If not provided, a random control-plane node will be selected automatically
+ - `node_selector` (`string`) - Optional node label selector to use, only relevant when specifying a command and image which needs to capture data on a set of cluster nodes simultaneously
+ - `host_network` (`boolean`) - Optionally run the must-gather pods in the host network of the node. This is only relevant if a specific gather image needs to capture host-level data
+ - `gather_command` (`string`) - Optionally specify a custom gather command to run a specialized script, eg. /usr/bin/gather_audit_logs (default: /usr/bin/gather)
+ - `all_component_images` (`boolean`) - Optional when enabled, collects and runs multiple must gathers for all operators and components on the cluster that have an annotated must-gather image available
+ - `images` (`array`) - Optional list of images to use for gathering custom information about specific operators or cluster components. If not specified, OpenShift's default must-gather image will be used by default
+ - `source_dir` (`string`) - Optional to set a specific directory where the pod will copy gathered data from (default: /must-gather)
+ - `timeout` (`string`) - Timeout of the gather process eg. 30s, 6m20s, or 2h10m30s
+ - `namespace` (`string`) - Optional to specify an existing privileged namespace where must-gather pods should run. If not provided, a temporary namespace will be created
+ - `keep_resources` (`boolean`) - Optional to retain all temporary resources when the mustgather completes, otherwise temporary resources created will be advised to be cleaned up
+ - `since` (`string`) - Optional to collect logs newer than a relative duration like 5s, 2m5s, or 3h6m10s. If unspecified, all available logs will be collected
+
+
+
diff --git a/docs/OPENSHIFT.md b/docs/OPENSHIFT.md
new file mode 100644
index 000000000..67f9b9486
--- /dev/null
+++ b/docs/OPENSHIFT.md
@@ -0,0 +1,219 @@
+# OpenShift Toolset
+
+This toolset provides OpenShift-specific prompts for cluster management and troubleshooting.
+
+## Prompts
+
+### plan_mustgather
+
+Plan for collecting a must-gather archive from an OpenShift cluster. Must-gather is a tool for collecting cluster data related to debugging and troubleshooting like logs, Kubernetes resources, and more.
+
+This prompt generates YAML manifests for the must-gather resources that can be applied to the cluster.
+
+**Arguments:**
+- `node_name` (optional) - Specific node name to run must-gather pod on
+- `node_selector` (optional) - Node selector in `key=value,key2=value2` format to filter nodes for the pod
+- `source_dir` (optional) - Custom gather directory inside pod (default: `/must-gather`)
+- `namespace` (optional) - Privileged namespace to use for must-gather (auto-generated if not specified)
+- `gather_command` (optional) - Custom gather command e.g. `/usr/bin/gather_audit_logs` (default: `/usr/bin/gather`)
+- `timeout` (optional) - Timeout duration for gather command (e.g., `30m`, `1h`)
+- `since` (optional) - Only gather data newer than this duration (e.g., `5s`, `2m5s`, or `3h6m10s`), defaults to all data
+- `host_network` (optional) - Use host network for must-gather pod (`true`/`false`)
+- `keep_resources` (optional) - Keep pod resources after collection (`true`/`false`, default: `false`)
+- `all_component_images` (optional) - Include must-gather images from all installed operators (`true`/`false`)
+- `images` (optional) - Comma-separated list of custom must-gather container images
+
+**Example:**
+```
+# Basic must-gather collection
+{}
+
+# Collect with custom timeout and since
+{
+ "timeout": "30m",
+ "since": "1h"
+}
+
+# Collect from all component images
+{
+ "all_component_images": "true"
+}
+
+# Collect from specific operator image
+{
+ "images": "registry.redhat.io/openshift-logging/cluster-logging-rhel9-operator@sha256:..."
+}
+```
+
+## Enable the OpenShift Toolset
+
+### Option 1: Command Line
+
+```bash
+kubernetes-mcp-server --toolsets core,config,helm,openshift
+```
+
+### Option 2: Configuration File
+
+```toml
+toolsets = ["core", "config", "helm", "openshift"]
+```
+
+### Option 3: MCP Client Configuration
+
+```json
+{
+ "mcpServers": {
+ "kubernetes": {
+ "command": "npx",
+ "args": ["-y", "kubernetes-mcp-server@latest", "--toolsets", "core,config,helm,openshift"]
+ }
+ }
+}
+```
+
+## Prerequisites
+
+The OpenShift toolset requires:
+
+1. **OpenShift cluster** - These prompts are designed for OpenShift and automatically detect the cluster type
+2. **Proper RBAC** - The user/service account must have permissions to:
+ - Create namespaces
+ - Create service accounts
+ - Create cluster role bindings
+ - Create pods with privileged access
+ - List ClusterOperators and ClusterServiceVersions (for `all_component_images`)
+
+## How It Works
+
+### Must-Gather Plan Generation
+
+The `plan_mustgather` prompt generates YAML manifests for collecting diagnostic data from an OpenShift cluster:
+
+1. **Namespace** - A temporary namespace (e.g., `openshift-must-gather-xyz`) is created unless an existing namespace is specified
+2. **ServiceAccount** - A service account with cluster-admin permissions is created for the must-gather pod
+3. **ClusterRoleBinding** - Binds the service account to the cluster-admin role
+4. **Pod** - Runs the must-gather container(s) with the specified configuration
+
+### Component Image Discovery
+
+When `all_component_images` is enabled, the prompt discovers must-gather images from:
+- **ClusterOperators** - Looks for the `operators.openshift.io/must-gather-image` annotation
+- **ClusterServiceVersions** - Checks OLM-installed operators for the same annotation
+
+### Multiple Images Support
+
+Up to 8 gather images can be run concurrently. Each image runs in a separate container within the same pod, sharing the output volume.
+
+## Common Use Cases
+
+### Basic Cluster Diagnostics
+
+Collect general cluster diagnostics:
+```json
+{}
+```
+
+### Audit Logs Collection
+
+Collect audit logs with a custom gather command:
+```json
+{
+ "gather_command": "/usr/bin/gather_audit_logs",
+ "timeout": "2h"
+}
+```
+
+### Recent Logs Only
+
+Collect logs from the last 30 minutes:
+```json
+{
+ "since": "30m"
+}
+```
+
+### Specific Operator Diagnostics
+
+Collect diagnostics for a specific operator:
+```json
+{
+ "images": "registry.redhat.io/openshift-logging/cluster-logging-rhel9-operator@sha256:..."
+}
+```
+
+### Host Network Access
+
+For gather scripts that need host-level network access:
+```json
+{
+ "host_network": "true"
+}
+```
+
+### All Component Diagnostics
+
+Collect diagnostics from all operators with must-gather images:
+```json
+{
+ "all_component_images": "true",
+ "timeout": "1h"
+}
+```
+
+## Troubleshooting
+
+### Permission Errors
+
+If you see permission warnings, ensure your user has the required RBAC permissions:
+```bash
+oc auth can-i create namespaces
+oc auth can-i create clusterrolebindings
+oc auth can-i create pods --as=system:serviceaccount:openshift-must-gather-xxx:must-gather-collector
+```
+
+### Pod Not Starting
+
+Check if the node has enough resources and can pull the must-gather image:
+```bash
+oc get pods -n openshift-must-gather-xxx
+oc describe pod -n openshift-must-gather-xxx
+```
+
+### Timeout Issues
+
+For large clusters or audit log collection, increase the timeout:
+```json
+{
+ "timeout": "2h"
+}
+```
+
+### Image Pull Errors
+
+Ensure the must-gather image is accessible:
+```bash
+oc get secret -n openshift-config pull-secret
+```
+
+## Security Considerations
+
+### Privileged Access
+
+The must-gather pods run with:
+- `cluster-admin` ClusterRoleBinding
+- `system-cluster-critical` priority class
+- Tolerations for all taints
+- Optional host network access
+
+### Temporary Resources
+
+By default, all created resources (namespace, service account, cluster role binding) should be cleaned up after the must-gather collection is complete. Use `"keep_resources": "true"` to retain them for debugging.
+
+### Image Sources
+
+The prompt uses these default images:
+- **Must-gather**: `registry.redhat.io/openshift4/ose-must-gather:latest`
+- **Wait container**: `registry.redhat.io/ubi9/ubi-minimal`
+
+Custom images should be from trusted sources.
diff --git a/pkg/kubernetes/fakeclient/fakeclient.go b/pkg/kubernetes/fakeclient/fakeclient.go
new file mode 100644
index 000000000..0d10577c2
--- /dev/null
+++ b/pkg/kubernetes/fakeclient/fakeclient.go
@@ -0,0 +1,193 @@
+package fakeclient
+
+import (
+ "context"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/api"
+ authv1 "k8s.io/api/authorization/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/dynamic"
+ fakedynamic "k8s.io/client-go/dynamic/fake"
+ "k8s.io/client-go/kubernetes"
+ authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/restmapper"
+ "k8s.io/client-go/tools/clientcmd"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+ metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
+)
+
+// FakeKubernetesClient implements api.KubernetesClient for testing.
+// It embeds kubernetes.Interface (returning nil for unused methods) and
+// provides fake implementations only for commonly used methods.
+// The fake client supports sending SARC calls useful for CanIUse invocations.
+type FakeKubernetesClient struct {
+ kubernetes.Interface // embed interface, most methods return nil
+ DynClient dynamic.Interface
+ DiscClient *FakeDiscoveryClient
+ Mapper *ResettableRESTMapper
+ KnownAccessor map[string]bool
+}
+
+// ResettableRESTMapper wraps a RESTMapper and adds Reset() method
+type ResettableRESTMapper struct {
+ meta.RESTMapper
+}
+
+func (r *ResettableRESTMapper) Reset() {}
+
+// FakeDiscoveryClient implements discovery.CachedDiscoveryInterface
+type FakeDiscoveryClient struct {
+ discovery.CachedDiscoveryInterface
+ APIResourceLists []*metav1.APIResourceList
+}
+
+func (f *FakeDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
+ for _, rl := range f.APIResourceLists {
+ if rl.GroupVersion == groupVersion {
+ return rl, nil
+ }
+ }
+ return &metav1.APIResourceList{GroupVersion: groupVersion}, nil
+}
+
+func (f *FakeDiscoveryClient) Invalidate() {}
+func (f *FakeDiscoveryClient) Fresh() bool { return true }
+
+// FakeAuthorizationV1Client implements authorizationv1.AuthorizationV1Interface
+type FakeAuthorizationV1Client struct {
+ authorizationv1.AuthorizationV1Interface
+ KnownAccessor map[string]bool
+}
+
+func (f *FakeAuthorizationV1Client) SelfSubjectAccessReviews() authorizationv1.SelfSubjectAccessReviewInterface {
+ return &FakeSelfSubjectAccessReviews{KnownAccessor: f.KnownAccessor}
+}
+
+// FakeSelfSubjectAccessReviews implements authorizationv1.SelfSubjectAccessReviewInterface,
+// as this is a fake client the default behaviour on SARC create will return allowed: true,
+// for denial specifically set it using withDenyResource.
+type FakeSelfSubjectAccessReviews struct {
+ authorizationv1.SelfSubjectAccessReviewInterface
+ KnownAccessor map[string]bool
+}
+
+func (f *FakeSelfSubjectAccessReviews) Create(ctx context.Context, review *authv1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*authv1.SelfSubjectAccessReview, error) {
+ // allow ALL by default
+ review.Status.Allowed = true
+
+ ra := review.Spec.ResourceAttributes
+
+ // Check keys in order of specificity: exact match first, then more general
+ // "verb:group:resource:namespace:name" format
+ keysToCheck := []string{
+ // exact match
+ ra.Verb + ":" + ra.Group + ":" + ra.Resource + ":" + ra.Namespace + ":" + ra.Name,
+ // any name in namespace
+ ra.Verb + ":" + ra.Group + ":" + ra.Resource + ":" + ra.Namespace + ":",
+ // specific name, any namespace
+ ra.Verb + ":" + ra.Group + ":" + ra.Resource + "::" + ra.Name,
+ // any namespace, any name
+ ra.Verb + ":" + ra.Group + ":" + ra.Resource + "::",
+ }
+
+ for _, key := range keysToCheck {
+ if allowed, ok := f.KnownAccessor[key]; ok {
+ review.Status.Allowed = allowed
+ return review, nil
+ }
+ }
+
+ return review, nil
+}
+
+// Option is a functional option for configuring FakeKubernetesClient
+type Option func(*FakeKubernetesClient)
+
+// NewFakeKubernetesClient creates a fake kubernetes client for testing
+func NewFakeKubernetesClient(opts ...Option) *FakeKubernetesClient {
+ scheme := runtime.NewScheme()
+ _ = corev1.AddToScheme(scheme)
+
+ apiResourcesList := make([]*metav1.APIResourceList, 0)
+ apiGroupResources := make([]*restmapper.APIGroupResources, 0)
+
+ client := &FakeKubernetesClient{
+ DynClient: fakedynamic.NewSimpleDynamicClient(scheme),
+ DiscClient: &FakeDiscoveryClient{APIResourceLists: apiResourcesList},
+ Mapper: &ResettableRESTMapper{RESTMapper: restmapper.NewDiscoveryRESTMapper(apiGroupResources)},
+ KnownAccessor: make(map[string]bool),
+ }
+
+ for _, opt := range opts {
+ opt(client)
+ }
+ return client
+}
+
+// WithDeniedAccess sets the said resources to allowed: false,
+// denial on all resources of all namespaces, unless namespace and name are non-empty.
+func WithDeniedAccess(verb, group, resource, namespace, name string) Option {
+ return func(c *FakeKubernetesClient) {
+ key := verb + ":" + group + ":" + resource + ":" + namespace + ":" + name
+ c.KnownAccessor[key] = false
+ }
+}
+
+func (f *FakeKubernetesClient) NamespaceOrDefault(namespace string) string {
+ if namespace != "" {
+ return namespace
+ }
+
+ return "default"
+}
+
+func (f *FakeKubernetesClient) RESTConfig() *rest.Config {
+ return &rest.Config{Host: "https://fake-server:6443"}
+}
+
+func (f *FakeKubernetesClient) RESTMapper() meta.ResettableRESTMapper {
+ return f.Mapper
+}
+
+func (f *FakeKubernetesClient) DiscoveryClient() discovery.CachedDiscoveryInterface {
+ return f.DiscClient
+}
+
+func (f *FakeKubernetesClient) DynamicClient() dynamic.Interface {
+ return f.DynClient
+}
+
+func (f *FakeKubernetesClient) MetricsV1beta1Client() *metricsv1beta1.MetricsV1beta1Client {
+ return nil
+}
+
+// Override AuthorizationV1 to return our fake
+func (f *FakeKubernetesClient) AuthorizationV1() authorizationv1.AuthorizationV1Interface {
+ return &FakeAuthorizationV1Client{KnownAccessor: f.KnownAccessor}
+}
+
+// Implement genericclioptions.RESTClientGetter interface
+
+func (f *FakeKubernetesClient) ToRESTConfig() (*rest.Config, error) {
+ return f.RESTConfig(), nil
+}
+
+func (f *FakeKubernetesClient) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
+ return f.DiscClient, nil
+}
+
+func (f *FakeKubernetesClient) ToRESTMapper() (meta.RESTMapper, error) {
+ return f.Mapper, nil
+}
+
+func (f *FakeKubernetesClient) ToRawKubeConfigLoader() clientcmd.ClientConfig {
+ return clientcmd.NewDefaultClientConfig(*clientcmdapi.NewConfig(), nil)
+}
+
+// Verify interface compliance
+var _ api.KubernetesClient = (*FakeKubernetesClient)(nil)
diff --git a/pkg/kubernetes/resources.go b/pkg/kubernetes/resources.go
index 54f2e9ca7..97130643e 100644
--- a/pkg/kubernetes/resources.go
+++ b/pkg/kubernetes/resources.go
@@ -34,7 +34,7 @@ func (c *Core) ResourcesList(ctx context.Context, gvk *schema.GroupVersionKind,
// Check if operation is allowed for all namespaces (applicable for namespaced resources)
isNamespaced, _ := c.isNamespaced(gvk)
- if isNamespaced && !c.canIUse(ctx, gvr, namespace, "list") && namespace == "" {
+ if isNamespaced && !c.CanIUse(ctx, gvr, namespace, "list") && namespace == "" {
namespace = c.NamespaceOrDefault("")
}
if options.AsTable {
@@ -227,7 +227,9 @@ func (c *Core) supportsGroupVersion(groupVersion string) bool {
return true
}
-func (c *Core) canIUse(ctx context.Context, gvr *schema.GroupVersionResource, namespace, verb string) bool {
+// CanIUse checks if the current user has permission to perform a specific verb on a resource.
+// It uses SelfSubjectAccessReview to determine if the operation is allowed.
+func (c *Core) CanIUse(ctx context.Context, gvr *schema.GroupVersionResource, namespace, verb string) bool {
accessReviews := c.AuthorizationV1().SelfSubjectAccessReviews()
response, err := accessReviews.Create(ctx, &authv1.SelfSubjectAccessReview{
Spec: authv1.SelfSubjectAccessReviewSpec{ResourceAttributes: &authv1.ResourceAttributes{
diff --git a/pkg/mcp/modules.go b/pkg/mcp/modules.go
index 0cbcefbdd..b0204a6d5 100644
--- a/pkg/mcp/modules.go
+++ b/pkg/mcp/modules.go
@@ -9,4 +9,5 @@ import (
_ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/kubevirt"
_ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/netedge"
_ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/observability"
+ _ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/openshift"
)
diff --git a/pkg/mcp/testdata/toolsets-openshift-prompts.json b/pkg/mcp/testdata/toolsets-openshift-prompts.json
new file mode 100644
index 000000000..0d7162dcc
--- /dev/null
+++ b/pkg/mcp/testdata/toolsets-openshift-prompts.json
@@ -0,0 +1,52 @@
+[
+ {
+ "name": "plan_mustgather",
+ "description": "Plan for collecting a must-gather archive from an OpenShift cluster. Must-gather is a tool for collecting cluster data related to debugging and troubleshooting like logs, kubernetes resources, etc.",
+ "arguments": [
+ {
+ "name": "node_name",
+ "description": "Specific node name to run must-gather pod on"
+ },
+ {
+ "name": "node_selector",
+ "description": "Node selector in key=value,key2=value2 format to filter nodes for the pod"
+ },
+ {
+ "name": "source_dir",
+ "description": "Custom gather directory inside pod (default: /must-gather)"
+ },
+ {
+ "name": "namespace",
+ "description": "Privileged namespace to use for must-gather (auto-generated if not specified)"
+ },
+ {
+ "name": "gather_command",
+ "description": "Custom gather command eg. /usr/bin/gather_audit_logs (default: /usr/bin/gather)"
+ },
+ {
+ "name": "timeout",
+ "description": "Timeout duration for gather command (eg. 30m, 1h)"
+ },
+ {
+ "name": "since",
+ "description": "Only gather data newer than this duration (eg. 5s, 2m5s, or 3h6m10s) defaults to all data."
+ },
+ {
+ "name": "host_network",
+ "description": "Use host network for must-gather pod (true/false)"
+ },
+ {
+ "name": "keep_resources",
+ "description": "Keep pod resources after collection (true/false, default: false)"
+ },
+ {
+ "name": "all_component_images",
+ "description": "Include must-gather images from all installed operators (true/false)"
+ },
+ {
+ "name": "images",
+ "description": "Comma-separated list of custom must-gather container images"
+ }
+ ]
+ }
+]
diff --git a/pkg/mcp/testdata/toolsets-openshift-tools.json b/pkg/mcp/testdata/toolsets-openshift-tools.json
new file mode 100644
index 000000000..fe51488c7
--- /dev/null
+++ b/pkg/mcp/testdata/toolsets-openshift-tools.json
@@ -0,0 +1 @@
+[]
diff --git a/pkg/mcp/toolsets_test.go b/pkg/mcp/toolsets_test.go
index 14ba85c70..46ce64651 100644
--- a/pkg/mcp/toolsets_test.go
+++ b/pkg/mcp/toolsets_test.go
@@ -18,6 +18,7 @@ import (
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/helm"
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/kiali"
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/kubevirt"
+ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/openshift"
"github.com/mark3labs/mcp-go/mcp"
"github.com/stretchr/testify/suite"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
@@ -179,6 +180,42 @@ func (s *ToolsetsSuite) TestGranularToolsetsTools() {
}
}
+func (s *ToolsetsSuite) TestOpenShiftToolset() {
+ s.Run("OpenShift toolset in OpenShift cluster", func() {
+ s.Handle(test.NewInOpenShiftHandler())
+ toolsets.Clear()
+ toolsets.Register(&openshift.Toolset{})
+ s.Cfg.Toolsets = []string{"openshift"}
+ s.InitMcpClient()
+ tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
+ s.Run("ListTools returns tools", func() {
+ s.NotNil(tools, "Expected tools from ListTools")
+ s.NoError(err, "Expected no error from ListTools")
+ })
+ s.Run("ListTools returns correct Tool metadata", func() {
+ s.assertJsonSnapshot("toolsets-openshift-tools.json", tools.Tools)
+ })
+ })
+}
+
+func (s *ToolsetsSuite) TestOpenShiftToolsetPrompts() {
+ s.Run("OpenShift toolset prompts in OpenShift cluster", func() {
+ s.Handle(test.NewInOpenShiftHandler())
+ toolsets.Clear()
+ toolsets.Register(&openshift.Toolset{})
+ s.Cfg.Toolsets = []string{"openshift"}
+ s.InitMcpClient()
+ prompts, err := s.ListPrompts(s.T().Context(), mcp.ListPromptsRequest{})
+ s.Run("ListPrompts returns prompts", func() {
+ s.NotNil(prompts, "Expected prompts from ListPrompts")
+ s.NoError(err, "Expected no error from ListPrompts")
+ })
+ s.Run("ListPrompts returns correct Prompt metadata", func() {
+ s.assertJsonSnapshot("toolsets-openshift-prompts.json", prompts.Prompts)
+ })
+ })
+}
+
func (s *ToolsetsSuite) TestInputSchemaEdgeCases() {
//https://github.com/containers/kubernetes-mcp-server/issues/340
s.Run("InputSchema for no-arg tool is object with empty properties", func() {
diff --git a/pkg/ocp/mustgather/plan_mustgather.go b/pkg/ocp/mustgather/plan_mustgather.go
new file mode 100644
index 000000000..9ebe7cdfb
--- /dev/null
+++ b/pkg/ocp/mustgather/plan_mustgather.go
@@ -0,0 +1,417 @@
+package mustgather
+
+import (
+ "context"
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/api"
+ "github.com/containers/kubernetes-mcp-server/pkg/kubernetes"
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/rand"
+ "k8s.io/client-go/dynamic"
+ "sigs.k8s.io/yaml"
+)
+
+const (
+ defaultGatherSourceDir = "/must-gather/"
+ defaultMustGatherImage = "registry.redhat.io/openshift4/ose-must-gather:latest"
+ defaultGatherCmd = "/usr/bin/gather"
+ mgAnnotation = "operators.openshift.io/must-gather-image"
+ maxConcurrentGathers = 8
+)
+
+// PlanMustGatherParams contains the parameters for planning a must-gather collection.
+type PlanMustGatherParams struct {
+ NodeName string
+ NodeSelector map[string]string
+ HostNetwork bool
+ SourceDir string // custom gather directory inside pod, default is "/must-gather"
+ Namespace string
+ KeepResources bool
+ GatherCommand string // custom gather command, default is "/usr/bin/gather"
+ AllImages bool // whether to use custom gather images from installed operators on cluster
+ Images []string // custom list of must-gather images
+ Timeout string
+ Since string
+}
+
+// PlanMustGather generates a must-gather plan with YAML manifests for creating the required resources.
+// It returns the plan as a string containing YAML manifests and instructions.
+func PlanMustGather(ctx context.Context, k api.KubernetesClient, params PlanMustGatherParams) (string, error) {
+ dynamicClient := k.DynamicClient()
+ k8sCore := kubernetes.NewCore(k)
+
+ sourceDir := params.SourceDir
+ if sourceDir == "" {
+ sourceDir = defaultGatherSourceDir
+ } else {
+ sourceDir = path.Clean(sourceDir)
+ }
+
+ namespace := params.Namespace
+ if namespace == "" {
+ namespace = fmt.Sprintf("openshift-must-gather-%s", rand.String(6))
+ }
+
+ gatherCmd := params.GatherCommand
+ if gatherCmd == "" {
+ gatherCmd = defaultGatherCmd
+ }
+
+ images := params.Images
+ if params.AllImages {
+ componentImages, err := getComponentImages(ctx, dynamicClient)
+ if err != nil {
+ return "", fmt.Errorf("failed to get operator images: %v", err)
+ }
+ images = append(images, componentImages...)
+ }
+
+ if len(images) > maxConcurrentGathers {
+ return "", fmt.Errorf("more than %d gather images are not supported", maxConcurrentGathers)
+ }
+
+ timeout := params.Timeout
+ if timeout != "" {
+ _, err := time.ParseDuration(timeout)
+ if err != nil {
+ return "", fmt.Errorf("timeout duration is not valid")
+ }
+ gatherCmd = fmt.Sprintf("/usr/bin/timeout %s %s", timeout, gatherCmd)
+ }
+
+ since := params.Since
+ if since != "" {
+ _, err := time.ParseDuration(since)
+ if err != nil {
+ return "", fmt.Errorf("since duration is not valid")
+ }
+ }
+
+ envVars := []corev1.EnvVar{}
+ if since != "" {
+ envVars = append(envVars, corev1.EnvVar{
+ Name: "MUST_GATHER_SINCE",
+ Value: since,
+ })
+ }
+
+ // template container for gather,
+ // if multiple images are added multiple containers in the same pod will be spin up
+ gatherContainerTemplate := corev1.Container{
+ Name: "gather",
+ Image: defaultMustGatherImage,
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ Command: []string{gatherCmd},
+ Env: envVars,
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "must-gather-output",
+ MountPath: sourceDir,
+ },
+ },
+ }
+
+ var gatherContainers = []corev1.Container{
+ *gatherContainerTemplate.DeepCopy(),
+ }
+
+ if len(images) > 0 {
+ gatherContainers = make([]corev1.Container, len(images))
+ }
+
+ for i, image := range images {
+ gatherContainers[i] = *gatherContainerTemplate.DeepCopy()
+
+ // if more than one gather container(s) are added,
+ // suffix container name with int id
+ if len(images) > 1 {
+ gatherContainers[i].Name = fmt.Sprintf("gather-%d", i+1)
+ }
+ gatherContainers[i].Image = image
+ }
+
+ serviceAccountName := "must-gather-collector"
+
+ pod := &corev1.Pod{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "v1",
+ Kind: "Pod",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ // Avoiding generateName as resources_create_or_update fails without explicit name.
+ Name: fmt.Sprintf("must-gather-%s", rand.String(6)),
+ Namespace: namespace,
+ },
+ Spec: corev1.PodSpec{
+ ServiceAccountName: serviceAccountName,
+ NodeName: params.NodeName,
+ PriorityClassName: "system-cluster-critical",
+ RestartPolicy: corev1.RestartPolicyNever,
+ Volumes: []corev1.Volume{
+ {
+ Name: "must-gather-output",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ },
+ },
+ Containers: append(gatherContainers, corev1.Container{
+ Name: "wait",
+ Image: "registry.redhat.io/ubi9/ubi-minimal",
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ Command: []string{"/bin/bash", "-c", "sleep infinity"},
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "must-gather-output",
+ MountPath: "/must-gather",
+ },
+ },
+ }),
+ HostNetwork: params.HostNetwork,
+ NodeSelector: params.NodeSelector,
+ Tolerations: []corev1.Toleration{
+ {
+ Operator: "Exists",
+ },
+ },
+ },
+ }
+
+ namespaceExists := false
+ _, err := k8sCore.ResourcesGet(ctx, &schema.GroupVersionKind{
+ Group: "",
+ Version: "v1",
+ Kind: "Namespace",
+ }, "", namespace)
+ if err == nil {
+ namespaceExists = true
+ }
+
+ var namespaceObj *corev1.Namespace
+ if !namespaceExists {
+ namespaceObj = &corev1.Namespace{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "v1",
+ Kind: "Namespace",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace,
+ },
+ }
+ }
+
+ serviceAccount := &corev1.ServiceAccount{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "v1",
+ Kind: "ServiceAccount",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: serviceAccountName,
+ Namespace: namespace,
+ },
+ }
+
+ clusterRoleBindingName := fmt.Sprintf("%s-must-gather-collector", namespace)
+ clusterRoleBinding := &rbacv1.ClusterRoleBinding{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "rbac.authorization.k8s.io/v1",
+ Kind: "ClusterRoleBinding",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: clusterRoleBindingName,
+ },
+ RoleRef: rbacv1.RoleRef{
+ APIGroup: "rbac.authorization.k8s.io",
+ Kind: "ClusterRole",
+ Name: "cluster-admin",
+ },
+ Subjects: []rbacv1.Subject{
+ {
+ Kind: "ServiceAccount",
+ Name: serviceAccountName,
+ Namespace: namespace,
+ },
+ },
+ }
+
+ allowChecks := map[string]struct {
+ schema.GroupVersionResource
+ name string
+ verb string
+ }{
+ "create_namespace": {
+ GroupVersionResource: schema.GroupVersionResource{Version: "v1", Resource: "namespaces"},
+ verb: "create",
+ },
+ "create_serviceaccount": {
+ GroupVersionResource: schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"},
+ verb: "create",
+ },
+ "create_clusterrolebinding": {
+ GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterrolebindings"},
+ verb: "create",
+ },
+ "create_pod": {
+ GroupVersionResource: schema.GroupVersionResource{Version: "v1", Resource: "pods"},
+ verb: "create",
+ },
+ "use_scc_hostnetwork": {
+ GroupVersionResource: schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "securitycontextconstraints"},
+ name: "hostnetwork-v2",
+ verb: "use",
+ },
+ }
+ isAllowed := make(map[string]bool)
+
+ for key, check := range allowChecks {
+ isAllowed[key] = k8sCore.CanIUse(ctx, &check.GroupVersionResource, "", check.verb)
+ }
+
+ var result strings.Builder
+ result.WriteString("Plan contains YAML manifests for must-gather pods and required resources (namespace, serviceaccount, clusterrolebinding). " +
+ "Suggest how the user can apply the manifest and copy results locally (`oc cp` / `kubectl cp`). \n\n",
+ )
+ result.WriteString("Ask the user if they want to apply the plan \n" +
+ "- use the resource_create_or_update tool to apply the manifest \n" +
+ "- alternatively, advise the user to execute `oc apply` / `kubectl apply` instead. \n\n",
+ )
+
+ if !params.KeepResources {
+ result.WriteString("Once the must-gather collection is completed, the user may wish to cleanup the created resources. \n" +
+ "- use the resources_delete tool to delete the namespace and the clusterrolebinding \n" +
+ "- or, execute cleanup using `kubectl delete`. \n\n")
+ }
+
+ if !namespaceExists && isAllowed["create_namespace"] {
+ namespaceYaml, err := yaml.Marshal(namespaceObj)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal namespace to yaml: %w", err)
+ }
+
+ result.WriteString("```yaml\n")
+ result.Write(namespaceYaml)
+ result.WriteString("```\n\n")
+ }
+
+ if !namespaceExists && !isAllowed["create_namespace"] {
+ result.WriteString("WARNING: The resources_create_or_update call does not have permission to create namespace(s).\n")
+ }
+
+ // yaml(s) are dumped into individual code blocks of ``` ```
+ // because resources_create_or_update tool call fails when content has more than one more resource,
+ // some models are smart to detect an error and retry with one resource a time though.
+
+ serviceAccountYaml, err := yaml.Marshal(serviceAccount)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal service account to yaml: %w", err)
+ }
+ result.WriteString("```yaml\n")
+ result.Write(serviceAccountYaml)
+ result.WriteString("```\n\n")
+
+ if !isAllowed["create_serviceaccount"] {
+ result.WriteString("WARNING: The resources_create_or_update call does not have permission to create serviceaccount(s).\n")
+ }
+
+ clusterRoleBindingYaml, err := yaml.Marshal(clusterRoleBinding)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal cluster role binding to yaml: %w", err)
+ }
+
+ result.WriteString("```yaml\n")
+ result.Write(clusterRoleBindingYaml)
+ result.WriteString("```\n\n")
+
+ if !isAllowed["create_clusterrolebinding"] {
+ result.WriteString("WARNING: The resources_create_or_update call does not have permission to create clusterrolebinding(s).\n")
+ }
+
+ podYaml, err := yaml.Marshal(pod)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal pod to yaml: %w", err)
+ }
+
+ result.WriteString("```yaml\n")
+ result.Write(podYaml)
+ result.WriteString("```\n")
+
+ if !isAllowed["create_pod"] {
+ result.WriteString("WARNING: The resources_create_or_update call does not have permission to create pod(s).\n")
+ }
+
+ if params.HostNetwork && !isAllowed["use_scc_hostnetwork"] {
+ result.WriteString("WARNING: The resources_create_or_update call does not have permission to create pod(s) with hostNetwork: true.\n")
+ }
+
+ return result.String(), nil
+}
+
+func getComponentImages(ctx context.Context, dynamicClient dynamic.Interface) ([]string, error) {
+ var images []string
+
+ appendImageFromAnnotation := func(obj runtime.Object) error {
+ unstruct, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
+ if err != nil {
+ return err
+ }
+
+ u := unstructured.Unstructured{Object: unstruct}
+ annotations := u.GetAnnotations()
+ if annotations[mgAnnotation] != "" {
+ images = append(images, annotations[mgAnnotation])
+ }
+
+ return nil
+ }
+
+ // List ClusterOperators
+ clusterOperatorGVR := schema.GroupVersionResource{
+ Group: "config.openshift.io",
+ Version: "v1",
+ Resource: "clusteroperators",
+ }
+ clusterOperatorsList, err := dynamicClient.Resource(clusterOperatorGVR).List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ if err := clusterOperatorsList.EachListItem(appendImageFromAnnotation); err != nil {
+ return images, err
+ }
+
+ // List ClusterServiceVersions
+ csvGVR := schema.GroupVersionResource{
+ Group: "operators.coreos.com",
+ Version: "v1alpha1",
+ Resource: "clusterserviceversions",
+ }
+ csvList, err := dynamicClient.Resource(csvGVR).List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return images, err
+ }
+
+ err = csvList.EachListItem(appendImageFromAnnotation)
+ return images, err
+}
+
+// ParseNodeSelector parses a comma-separated key=value selector string into a map.
+func ParseNodeSelector(selector string) map[string]string {
+ result := make(map[string]string)
+ pairs := strings.Split(selector, ",")
+ for _, pair := range pairs {
+ kv := strings.SplitN(strings.TrimSpace(pair), "=", 2)
+ if len(kv) == 2 && strings.TrimSpace(kv[0]) != "" {
+ result[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
+ }
+ }
+ return result
+}
diff --git a/pkg/ocp/mustgather/plan_mustgather_test.go b/pkg/ocp/mustgather/plan_mustgather_test.go
new file mode 100644
index 000000000..2f694f563
--- /dev/null
+++ b/pkg/ocp/mustgather/plan_mustgather_test.go
@@ -0,0 +1,282 @@
+package mustgather
+
+import (
+ "context"
+ "testing"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/kubernetes/fakeclient"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPlanMustGather(t *testing.T) {
+ ctx := context.Background()
+
+ tests := []struct {
+ name string
+ params PlanMustGatherParams
+ shouldContain []string
+ shouldNotContain []string
+ wantError string
+ }{
+ {
+ name: "generates plan with default values",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{
+ "apiVersion: v1",
+ "kind: Pod",
+ "kind: ServiceAccount",
+ "kind: ClusterRoleBinding",
+ "must-gather-collector",
+ "image: registry.redhat.io/openshift4/ose-must-gather:latest",
+ "mountPath: /must-gather",
+ },
+ },
+ {
+ name: "generates plan with custom namespace",
+ params: PlanMustGatherParams{Namespace: "custom-must-gather-ns"},
+ shouldContain: []string{"namespace: custom-must-gather-ns"},
+ },
+ {
+ name: "generates plan with node name",
+ params: PlanMustGatherParams{NodeName: "worker-node-1"},
+ shouldContain: []string{"nodeName: worker-node-1"},
+ },
+ {
+ name: "generates plan with host network enabled",
+ params: PlanMustGatherParams{HostNetwork: true},
+ shouldContain: []string{"hostNetwork: true"},
+ },
+ {
+ name: "generates plan with custom source dir",
+ params: PlanMustGatherParams{SourceDir: "/custom/gather/path"},
+ shouldContain: []string{"mountPath: /custom/gather/path"},
+ },
+ {
+ name: "generates plan with multiple custom images",
+ params: PlanMustGatherParams{
+ Images: []string{"quay.io/custom/must-gather-1:v1", "quay.io/custom/must-gather-2:v2"},
+ },
+ shouldContain: []string{
+ "image: quay.io/custom/must-gather-1:v1",
+ "image: quay.io/custom/must-gather-2:v2",
+ "name: gather-1",
+ "name: gather-2",
+ },
+ },
+ {
+ name: "returns error when more than eight images",
+ params: PlanMustGatherParams{
+ Images: []string{
+ "quay.io/image/1", "quay.io/image/2", "quay.io/image/3", "quay.io/image/4",
+ "quay.io/image/5", "quay.io/image/6", "quay.io/image/7", "quay.io/image/8",
+ "quay.io/image/9",
+ },
+ },
+ wantError: "more than 8 gather images are not supported",
+ },
+ {
+ name: "generates plan with valid timeout",
+ params: PlanMustGatherParams{Timeout: "30m"},
+ shouldContain: []string{"/usr/bin/timeout 30m /usr/bin/gather"},
+ },
+ {
+ name: "returns error for invalid timeout format",
+ params: PlanMustGatherParams{Timeout: "invalid-duration"},
+ wantError: "timeout duration is not valid",
+ },
+ {
+ name: "generates plan with valid since duration",
+ params: PlanMustGatherParams{Since: "1h"},
+ shouldContain: []string{"name: MUST_GATHER_SINCE", "value: 1h"},
+ },
+ {
+ name: "returns error for invalid since format",
+ params: PlanMustGatherParams{Since: "not-a-duration"},
+ wantError: "since duration is not valid",
+ },
+ {
+ name: "generates plan with custom gather command",
+ params: PlanMustGatherParams{GatherCommand: "/custom/gather/script"},
+ shouldContain: []string{"/custom/gather/script"},
+ },
+ {
+ name: "generates plan with node selector",
+ params: PlanMustGatherParams{NodeSelector: map[string]string{"node-role.kubernetes.io/worker": ""}},
+ shouldContain: []string{"nodeSelector:", "node-role.kubernetes.io/worker"},
+ },
+ {
+ name: "generates plan with cleanup instructions when keep_resources is false",
+ params: PlanMustGatherParams{KeepResources: false},
+ shouldContain: []string{"cleanup the created resources"},
+ },
+ {
+ name: "generates plan without cleanup instructions when keep_resources is true",
+ params: PlanMustGatherParams{KeepResources: true},
+ shouldNotContain: []string{"cleanup the created resources"},
+ },
+ {
+ name: "cleans source dir path",
+ params: PlanMustGatherParams{SourceDir: "/custom/path/../gather/./dir"},
+ shouldContain: []string{"mountPath: /custom/gather/dir"},
+ },
+ {
+ name: "generates plan with timeout and gather command combined",
+ params: PlanMustGatherParams{Timeout: "15m", GatherCommand: "/custom/gather"},
+ shouldContain: []string{"/usr/bin/timeout 15m /custom/gather"},
+ },
+ {
+ name: "generates plan with all parameters combined",
+ params: PlanMustGatherParams{
+ Namespace: "test-ns",
+ NodeName: "node-1",
+ HostNetwork: true,
+ SourceDir: "/gather-output",
+ Since: "2h",
+ Timeout: "45m",
+ Images: []string{"quay.io/test/gather:v1"},
+ NodeSelector: map[string]string{"kubernetes.io/os": "linux"},
+ },
+ shouldContain: []string{
+ "namespace: test-ns",
+ "nodeName: node-1",
+ "hostNetwork: true",
+ "mountPath: /gather-output",
+ "value: 2h",
+ "/usr/bin/timeout 45m",
+ "image: quay.io/test/gather:v1",
+ "kubernetes.io/os",
+ },
+ },
+ {
+ name: "handles empty string timeout",
+ params: PlanMustGatherParams{Timeout: ""},
+ shouldContain: []string{"/usr/bin/gather"},
+ shouldNotContain: []string{"/usr/bin/timeout"},
+ },
+ {
+ name: "handles empty string since",
+ params: PlanMustGatherParams{Since: ""},
+ shouldNotContain: []string{"MUST_GATHER_SINCE"},
+ },
+ {
+ name: "handles empty images slice",
+ params: PlanMustGatherParams{Images: []string{}},
+ shouldContain: []string{"image: registry.redhat.io/openshift4/ose-must-gather:latest"},
+ },
+ {
+ name: "handles nil node selector",
+ params: PlanMustGatherParams{NodeSelector: nil},
+ },
+ {
+ name: "handles empty node selector map",
+ params: PlanMustGatherParams{NodeSelector: map[string]string{}},
+ },
+ {
+ name: "includes wait container in pod spec",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{"name: wait", "sleep infinity"},
+ },
+ {
+ name: "includes tolerations for all taints",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{"tolerations:", "operator: Exists"},
+ },
+ {
+ name: "includes priority class",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{"priorityClassName: system-cluster-critical"},
+ },
+ {
+ name: "includes restart policy never",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{"restartPolicy: Never"},
+ },
+ {
+ name: "includes cluster-admin role binding",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{"name: cluster-admin", "kind: ClusterRole"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ client := fakeclient.NewFakeKubernetesClient()
+
+ result, err := PlanMustGather(ctx, client, tt.params)
+
+ if tt.wantError != "" {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tt.wantError)
+ require.Empty(t, result)
+ return
+ }
+
+ require.NoError(t, err)
+ require.NotEmpty(t, result)
+
+ for _, want := range tt.shouldContain {
+ require.Contains(t, result, want)
+ }
+ for _, notWant := range tt.shouldNotContain {
+ require.NotContains(t, result, notWant)
+ }
+ })
+ }
+
+ sarcTests := []struct {
+ name string
+ permissions []fakeclient.Option
+ params PlanMustGatherParams
+ shouldContain string
+ }{
+ {
+ name: "includes warning when no namespace create permission",
+ permissions: []fakeclient.Option{
+ fakeclient.WithDeniedAccess("create", "", "namespaces", "", ""),
+ },
+ shouldContain: "WARNING: The resources_create_or_update call does not have permission to create namespace(s)",
+ },
+ {
+ name: "includes warning when no serviceaccount create permission",
+ permissions: []fakeclient.Option{
+ fakeclient.WithDeniedAccess("create", "", "serviceaccounts", "", ""),
+ },
+ shouldContain: "WARNING: The resources_create_or_update call does not have permission to create serviceaccount(s)",
+ },
+ {
+ name: "includes warning when no clusterrolebinding create permission",
+ permissions: []fakeclient.Option{
+ fakeclient.WithDeniedAccess("create", "rbac.authorization.k8s.io", "clusterrolebindings", "", ""),
+ },
+ shouldContain: "WARNING: The resources_create_or_update call does not have permission to create clusterrolebinding(s)",
+ },
+ {
+ name: "includes warning when no pod create permission",
+ permissions: []fakeclient.Option{
+ fakeclient.WithDeniedAccess("create", "", "pods", "", ""),
+ },
+ shouldContain: "WARNING: The resources_create_or_update call does not have permission to create pod(s)",
+ },
+ {
+ name: "includes warning when hostNetwork enabled without SCC permission",
+ permissions: []fakeclient.Option{
+ fakeclient.WithDeniedAccess("use", "security.openshift.io", "securitycontextconstraints", "", ""),
+ },
+ shouldContain: "WARNING: The resources_create_or_update call does not have permission to create pod(s) with hostNetwork: true",
+ params: PlanMustGatherParams{
+ HostNetwork: true,
+ },
+ },
+ }
+
+ for _, tt := range sarcTests {
+ t.Run(tt.name, func(t *testing.T) {
+ client := fakeclient.NewFakeKubernetesClient(tt.permissions...)
+
+ result, err := PlanMustGather(ctx, client, tt.params)
+
+ require.NoError(t, err)
+ require.NotEmpty(t, result)
+ require.Contains(t, result, tt.shouldContain)
+ })
+ }
+}
diff --git a/pkg/toolsets/openshift/mustgather/mustgather.go b/pkg/toolsets/openshift/mustgather/mustgather.go
new file mode 100644
index 000000000..7a721a2fa
--- /dev/null
+++ b/pkg/toolsets/openshift/mustgather/mustgather.go
@@ -0,0 +1,156 @@
+package mustgather
+
+import (
+ "strings"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/api"
+ "github.com/containers/kubernetes-mcp-server/pkg/ocp/mustgather"
+)
+
+// Prompts returns the ServerPrompt definitions for must-gather operations.
+func Prompts() []api.ServerPrompt {
+ return []api.ServerPrompt{{
+ Prompt: api.Prompt{
+ Name: "plan_mustgather",
+ Title: "Plan a must-gather collection",
+ Description: "Plan for collecting a must-gather archive from an OpenShift cluster. Must-gather is a tool for collecting cluster data related to debugging and troubleshooting like logs, kubernetes resources, etc.",
+ Arguments: []api.PromptArgument{
+ {
+ Name: "node_name",
+ Description: "Specific node name to run must-gather pod on",
+ Required: false,
+ },
+ {
+ Name: "node_selector",
+ Description: "Node selector in key=value,key2=value2 format to filter nodes for the pod",
+ Required: false,
+ },
+ {
+ Name: "source_dir",
+ Description: "Custom gather directory inside pod (default: /must-gather)",
+ Required: false,
+ },
+ {
+ Name: "namespace",
+ Description: "Privileged namespace to use for must-gather (auto-generated if not specified)",
+ Required: false,
+ },
+ {
+ Name: "gather_command",
+ Description: "Custom gather command eg. /usr/bin/gather_audit_logs (default: /usr/bin/gather)",
+ Required: false,
+ },
+ {
+ Name: "timeout",
+ Description: "Timeout duration for gather command (eg. 30m, 1h)",
+ Required: false,
+ },
+ {
+ Name: "since",
+ Description: "Only gather data newer than this duration (eg. 5s, 2m5s, or 3h6m10s) defaults to all data.",
+ Required: false,
+ },
+ {
+ Name: "host_network",
+ Description: "Use host network for must-gather pod (true/false)",
+ Required: false,
+ },
+ {
+ Name: "keep_resources",
+ Description: "Keep pod resources after collection (true/false, default: false)",
+ Required: false,
+ },
+ {
+ Name: "all_component_images",
+ Description: "Include must-gather images from all installed operators (true/false)",
+ Required: false,
+ },
+ {
+ Name: "images",
+ Description: "Comma-separated list of custom must-gather container images",
+ Required: false,
+ },
+ },
+ },
+ Handler: planMustGatherHandler,
+ }}
+}
+
+// planMustGatherHandler is the handler that parses arguments and calls the core
+// PlanMustGather function.
+func planMustGatherHandler(params api.PromptHandlerParams) (*api.PromptCallResult, error) {
+ args := params.GetArguments()
+
+ mgParams := mustgather.PlanMustGatherParams{
+ NodeName: args["node_name"],
+ NodeSelector: mustgather.ParseNodeSelector(args["node_selector"]),
+ SourceDir: args["source_dir"],
+ Namespace: args["namespace"],
+ GatherCommand: args["gather_command"],
+ Timeout: args["timeout"],
+ Since: args["since"],
+ HostNetwork: parseBool(args["host_network"]),
+ KeepResources: parseBool(args["keep_resources"]),
+ AllImages: parseBool(args["all_component_images"]),
+ Images: parseImages(args["images"]),
+ }
+
+ // params embeds api.KubernetesClient
+ result, err := mustgather.PlanMustGather(params.Context, params, mgParams)
+ if err != nil {
+ return nil, err
+ }
+
+ return api.NewPromptCallResult(
+ "Must-gather plan generated successfully",
+ []api.PromptMessage{
+ {
+ Role: "user",
+ Content: api.PromptContent{
+ Type: "text",
+ Text: formatMustGatherPrompt(result),
+ },
+ },
+ {
+ Role: "assistant",
+ Content: api.PromptContent{
+ Type: "text",
+ Text: "I'll help you apply this must-gather plan to collect diagnostic data from your OpenShift cluster.",
+ },
+ },
+ },
+ nil,
+ ), nil
+}
+
+// parseBool parses a string value to boolean, returns false for empty or invalid values.
+func parseBool(value string) bool {
+ return strings.ToLower(strings.TrimSpace(value)) == "true"
+}
+
+// parseImages parses a comma-separated list of images into a slice.
+func parseImages(value string) []string {
+ if value == "" {
+ return nil
+ }
+ var images []string
+ for _, img := range strings.Split(value, ",") {
+ img = strings.TrimSpace(img)
+ if img != "" {
+ images = append(images, img)
+ }
+ }
+ return images
+}
+
+// formatMustGatherPrompt formats the must-gather plan result into a prompt for the LLM.
+func formatMustGatherPrompt(planResult string) string {
+ var sb strings.Builder
+
+ sb.WriteString("# Must-Gather Collection Plan\n\n")
+ sb.WriteString(planResult)
+ sb.WriteString("\n---\n\n")
+ sb.WriteString("**Please review the plan above and confirm if you want to proceed with applying these resources.**\n")
+
+ return sb.String()
+}
diff --git a/pkg/toolsets/openshift/toolset.go b/pkg/toolsets/openshift/toolset.go
new file mode 100644
index 000000000..a3eb145b5
--- /dev/null
+++ b/pkg/toolsets/openshift/toolset.go
@@ -0,0 +1,35 @@
+package openshift
+
+import (
+ "slices"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/api"
+ "github.com/containers/kubernetes-mcp-server/pkg/toolsets"
+ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/openshift/mustgather"
+)
+
+type Toolset struct{}
+
+var _ api.Toolset = (*Toolset)(nil)
+
+func (t *Toolset) GetName() string {
+ return "openshift"
+}
+
+func (t *Toolset) GetDescription() string {
+ return "OpenShift-specific tools for cluster management and troubleshooting"
+}
+
+func (t *Toolset) GetTools(o api.Openshift) []api.ServerTool {
+ return nil
+}
+
+func (t *Toolset) GetPrompts() []api.ServerPrompt {
+ return slices.Concat(
+ mustgather.Prompts(),
+ )
+}
+
+func init() {
+ toolsets.Register(&Toolset{})
+}