diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 993c2e5..dd105ca 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -87,7 +87,7 @@ jobs: - name: Set up Helm uses: azure/setup-helm@v4.2.0 with: - version: v4.1.0 + version: v4.1.1 - name: Install unittest plugin run: | diff --git a/helm/kagent-tools/templates/_helpers.tpl b/helm/kagent-tools/templates/_helpers.tpl index 7922df0..d47a01e 100644 --- a/helm/kagent-tools/templates/_helpers.tpl +++ b/helm/kagent-tools/templates/_helpers.tpl @@ -65,6 +65,13 @@ Allows overriding it for multi-namespace deployments in combined charts. {{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} {{- end }} +{{/* +Service account name: default when useDefaultServiceAccount is true, otherwise the chart fullname. +*/}} +{{- define "kagent.serviceAccountName" -}} +{{- if .Values.useDefaultServiceAccount }}default{{- else }}{{ include "kagent.fullname" . }}{{- end }} +{{- end }} + {{/* Watch namespaces - transforms list of namespaces cached by the controller into comma-separated string Removes duplicates diff --git a/helm/kagent-tools/templates/clusterrole.yaml b/helm/kagent-tools/templates/clusterrole.yaml index cde9f45..cbd50bd 100644 --- a/helm/kagent-tools/templates/clusterrole.yaml +++ b/helm/kagent-tools/templates/clusterrole.yaml @@ -1,3 +1,4 @@ +{{- if not .Values.useDefaultServiceAccount }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -26,4 +27,5 @@ rules: verbs: - get - list - - watch \ No newline at end of file + - watch +{{- end }} \ No newline at end of file diff --git a/helm/kagent-tools/templates/clusterrolebinding.yaml b/helm/kagent-tools/templates/clusterrolebinding.yaml index ee7d67e..bbe51eb 100644 --- a/helm/kagent-tools/templates/clusterrolebinding.yaml +++ b/helm/kagent-tools/templates/clusterrolebinding.yaml @@ -1,3 +1,4 @@ +{{- if not .Values.useDefaultServiceAccount }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -41,4 +42,5 @@ roleRef: subjects: - kind: ServiceAccount name: {{ include "kagent.fullname" . }} - namespace: {{ include "kagent.namespace" . }} \ No newline at end of file + namespace: {{ include "kagent.namespace" . }} +{{- end }} \ No newline at end of file diff --git a/helm/kagent-tools/templates/deployment.yaml b/helm/kagent-tools/templates/deployment.yaml index 001caef..24d324e 100644 --- a/helm/kagent-tools/templates/deployment.yaml +++ b/helm/kagent-tools/templates/deployment.yaml @@ -51,7 +51,7 @@ spec: securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} - serviceAccountName: {{ include "kagent.fullname" . }} + serviceAccountName: {{ include "kagent.serviceAccountName" . }} containers: - name: tools command: @@ -91,6 +91,8 @@ spec: value: {{ .Values.otel.tracing.exporter.otlp.timeout | quote }} - name: OTEL_EXPORTER_OTLP_TRACES_INSECURE value: {{ .Values.otel.tracing.exporter.otlp.insecure | quote }} + - name: TOKEN_PASSTHROUGH + value: {{ (index .Values.tools "k8s" | default dict).tokenPassthrough | default false | quote }} {{- with .Values.tools.env }} {{- toYaml . | nindent 12 }} {{- end }} diff --git a/helm/kagent-tools/templates/serviceaccount.yaml b/helm/kagent-tools/templates/serviceaccount.yaml index b0b4c03..3422acb 100644 --- a/helm/kagent-tools/templates/serviceaccount.yaml +++ b/helm/kagent-tools/templates/serviceaccount.yaml @@ -1,7 +1,9 @@ +{{- if not .Values.useDefaultServiceAccount }} apiVersion: v1 kind: ServiceAccount metadata: name: {{ include "kagent.fullname" . }} namespace: {{ include "kagent.namespace" . }} labels: - {{- include "kagent.labels" . | nindent 4 }} \ No newline at end of file + {{- include "kagent.labels" . | nindent 4 }} +{{- end }} diff --git a/helm/kagent-tools/tests/deployment_test.yaml b/helm/kagent-tools/tests/deployment_test.yaml index 397fd41..0e4e8ca 100644 --- a/helm/kagent-tools/tests/deployment_test.yaml +++ b/helm/kagent-tools/tests/deployment_test.yaml @@ -60,13 +60,46 @@ tests: path: spec.template.spec.containers[0].resources.limits.memory value: 512Mi - - it: should have correct service account name + - it: should use default service account when useDefaultServiceAccount is true template: deployment.yaml + set: + useDefaultServiceAccount: true + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: default + + - it: should use dedicated service account when useDefaultServiceAccount is false + template: deployment.yaml + set: + useDefaultServiceAccount: false asserts: - equal: path: spec.template.spec.serviceAccountName value: RELEASE-NAME + - it: should set token passthrough env when tools.k8s.tokenPassthrough is true + template: deployment.yaml + set: + tools.k8s.tokenPassthrough: true + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: TOKEN_PASSTHROUGH + value: "true" + + - it: should set token passthrough env when tools.k8s.tokenPassthrough is false + template: deployment.yaml + set: + tools.k8s.tokenPassthrough: false + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: TOKEN_PASSTHROUGH + value: "false" + - it: should have correct container port template: deployment.yaml asserts: diff --git a/helm/kagent-tools/values.yaml b/helm/kagent-tools/values.yaml index 556f56e..dbe150e 100644 --- a/helm/kagent-tools/values.yaml +++ b/helm/kagent-tools/values.yaml @@ -1,6 +1,10 @@ # Default values for kagent replicaCount: 1 +# When true: pods use the default service account and no ClusterRole/ClusterRoleBinding are created. +# When false: a dedicated ServiceAccount and RBAC are created. +useDefaultServiceAccount: false + global: tag: "" @@ -27,6 +31,10 @@ tools: limits: cpu: 1000m memory: 512Mi + k8s: + # When true: a Bearer token in the Authorization header on each request is passed to kubectl; fails if missing + # When false: kubectl uses in-cluster ServiceAccount. + tokenPassthrough: false prometheus: url: "prometheus.kagent.svc.cluster.local:9090" username: "" diff --git a/internal/cmd/cmd.go b/internal/cmd/cmd.go index 3061006..0fa2741 100644 --- a/internal/cmd/cmd.go +++ b/internal/cmd/cmd.go @@ -20,10 +20,11 @@ type DefaultShellExecutor struct{} func (e *DefaultShellExecutor) Exec(ctx context.Context, command string, args ...string) ([]byte, error) { log := logger.WithContext(ctx) startTime := time.Now() + redactedArgs := logger.RedactArgsForLog(args) log.Info("executing command", "command", command, - "args", args, + "args", redactedArgs, ) cmd := exec.CommandContext(ctx, command, args...) @@ -34,7 +35,7 @@ func (e *DefaultShellExecutor) Exec(ctx context.Context, command string, args .. if err != nil { log.Error("command execution failed", "command", command, - "args", args, + "args", redactedArgs, "error", err, "output", string(output), "duration", duration.Seconds(), @@ -42,7 +43,7 @@ func (e *DefaultShellExecutor) Exec(ctx context.Context, command string, args .. } else { log.Info("command execution successful", "command", command, - "args", args, + "args", redactedArgs, "duration", duration.Seconds(), ) } diff --git a/internal/commands/builder.go b/internal/commands/builder.go index 3bced94..c9baed8 100644 --- a/internal/commands/builder.go +++ b/internal/commands/builder.go @@ -29,6 +29,7 @@ type CommandBuilder struct { namespace string context string kubeconfig string + token string output string labels map[string]string annotations map[string]string @@ -120,6 +121,14 @@ func (cb *CommandBuilder) WithKubeconfig(kubeconfig string) *CommandBuilder { return cb } +// WithToken sets the authentication token for kubectl commands +func (cb *CommandBuilder) WithToken(token string) *CommandBuilder { + if token != "" { + cb.token = token + } + return cb +} + // WithOutput sets the output format func (cb *CommandBuilder) WithOutput(output string) *CommandBuilder { validOutputs := []string{"json", "yaml", "wide", "name", "custom-columns", "custom-columns-file", "go-template", "go-template-file", "jsonpath", "jsonpath-file"} @@ -240,6 +249,11 @@ func (cb *CommandBuilder) Build() (string, []string, error) { args = append(args, "--kubeconfig", cb.kubeconfig) } + // Add token if specified + if cb.token != "" { + args = append(args, "--token", cb.token) + } + // Add output format if cb.output != "" { args = append(args, "--output", cb.output) @@ -293,7 +307,7 @@ func (cb *CommandBuilder) Execute(ctx context.Context) (string, error) { log := logger.WithContext(ctx) _, span := telemetry.StartSpan(ctx, "commands.execute", attribute.String("command", cb.command), - attribute.StringSlice("args", cb.args), + attribute.StringSlice("args", logger.RedactArgsForLog(cb.args)), attribute.Bool("cached", cb.cached), ) defer span.End() @@ -308,14 +322,15 @@ func (cb *CommandBuilder) Execute(ctx context.Context) (string, error) { return "", err } + redactedArgs := logger.RedactArgsForLog(args) span.SetAttributes( attribute.String("built_command", command), - attribute.StringSlice("built_args", args), + attribute.StringSlice("built_args", redactedArgs), ) log.Debug("executing command", "command", command, - "args", args, + "args", redactedArgs, "cached", cb.cached, ) @@ -343,9 +358,10 @@ func (cb *CommandBuilder) Execute(ctx context.Context) (string, error) { func (cb *CommandBuilder) executeWithCache(ctx context.Context, command string, args []string) (string, error) { log := logger.WithContext(ctx) + redactedArgs := logger.RedactArgsForLog(args) _, span := telemetry.StartSpan(ctx, "commands.executeWithCache", attribute.String("command", command), - attribute.StringSlice("args", args), + attribute.StringSlice("args", redactedArgs), attribute.Bool("cached", true), ) defer span.End() @@ -357,7 +373,7 @@ func (cb *CommandBuilder) executeWithCache(ctx context.Context, command string, log.Info("executing cached command", "command", command, - "args", args, + "args", redactedArgs, "cache_key", cacheKey, "cache_ttl", cb.cacheTTL.String(), ) @@ -374,7 +390,7 @@ func (cb *CommandBuilder) executeWithCache(ctx context.Context, command string, telemetry.AddEvent(span, "cache.miss.executing_command") log.Debug("cache miss, executing command", "command", command, - "args", args, + "args", redactedArgs, ) return cb.executeCommand(ctx, command, args) }) @@ -383,7 +399,7 @@ func (cb *CommandBuilder) executeWithCache(ctx context.Context, command string, telemetry.RecordError(span, err, "Cached command execution failed") log.Error("cached command execution failed", "command", command, - "args", args, + "args", redactedArgs, "cache_key", cacheKey, "error", err, ) @@ -393,7 +409,7 @@ func (cb *CommandBuilder) executeWithCache(ctx context.Context, command string, telemetry.RecordSuccess(span, "Cached command executed successfully") log.Info("cached command execution successful", "command", command, - "args", args, + "args", redactedArgs, "cache_key", cacheKey, "result_length", len(result), ) diff --git a/internal/logger/logger.go b/internal/logger/logger.go index b9a078f..0569689 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -59,19 +59,37 @@ func WithContext(ctx context.Context) *slog.Logger { return logger } +// RedactArgsForLog returns a copy of args with sensitive values redacted for logging. +// Any value immediately following "--token" is replaced with "" so tokens are not logged. +func RedactArgsForLog(args []string) []string { + if len(args) == 0 { + return nil + } + out := make([]string, len(args)) + copy(out, args) + for i := 0; i < len(out)-1; i++ { + if out[i] == "--token" { + out[i+1] = "" + i++ // skip the redacted value + } + } + return out +} + func LogExecCommand(ctx context.Context, logger *slog.Logger, command string, args []string, caller string) { logger.Info("executing command", "command", command, - "args", args, + "args", RedactArgsForLog(args), "caller", caller, ) } func LogExecCommandResult(ctx context.Context, logger *slog.Logger, command string, args []string, output string, err error, duration float64, caller string) { + redacted := RedactArgsForLog(args) if err != nil { logger.Error("command execution failed", "command", command, - "args", args, + "args", redacted, "error", err.Error(), "duration_seconds", duration, "caller", caller, @@ -79,7 +97,7 @@ func LogExecCommandResult(ctx context.Context, logger *slog.Logger, command stri } else { logger.Info("command execution successful", "command", command, - "args", args, + "args", redacted, "output", output, "duration_seconds", duration, "caller", caller, diff --git a/internal/logger/logger_test.go b/internal/logger/logger_test.go index efca71e..f6befc5 100644 --- a/internal/logger/logger_test.go +++ b/internal/logger/logger_test.go @@ -12,6 +12,43 @@ import ( "go.opentelemetry.io/otel/trace/noop" ) +func TestRedactArgsForLog(t *testing.T) { + t.Run("redacts token value", func(t *testing.T) { + args := []string{"get", "pods", "--token", "secret-token-123", "-n", "default"} + redacted := RedactArgsForLog(args) + require.Len(t, redacted, 6) + assert.Equal(t, "get", redacted[0]) + assert.Equal(t, "pods", redacted[1]) + assert.Equal(t, "--token", redacted[2]) + assert.Equal(t, "", redacted[3]) + assert.Equal(t, "-n", redacted[4]) + assert.Equal(t, "default", redacted[5]) + }) + t.Run("empty args returns nil", func(t *testing.T) { + assert.Nil(t, RedactArgsForLog(nil)) + assert.Nil(t, RedactArgsForLog([]string{})) + }) + t.Run("args without token unchanged", func(t *testing.T) { + args := []string{"get", "pods", "-n", "default"} + redacted := RedactArgsForLog(args) + assert.Equal(t, args, redacted) + }) + t.Run("--token at end with no value", func(t *testing.T) { + args := []string{"get", "pods", "--token"} + redacted := RedactArgsForLog(args) + assert.Equal(t, args, redacted) + }) + t.Run("logged output does not contain token", func(t *testing.T) { + var buf bytes.Buffer + log := slog.New(slog.NewTextHandler(&buf, nil)) + args := []string{"get", "pods", "--token", "secret-token-123"} + log.Info("executing command", "command", "kubectl", "args", RedactArgsForLog(args)) + output := buf.String() + assert.Contains(t, output, "") + assert.NotContains(t, output, "secret-token-123") + }) +} + func TestLogExecCommand(t *testing.T) { var buf bytes.Buffer logger := slog.New(slog.NewTextHandler(&buf, nil)) diff --git a/pkg/k8s/k8s.go b/pkg/k8s/k8s.go index f9184d1..22a8bad 100644 --- a/pkg/k8s/k8s.go +++ b/pkg/k8s/k8s.go @@ -6,6 +6,7 @@ import ( "fmt" "maps" "math/rand" + "net/http" "os" "slices" "strings" @@ -24,21 +25,22 @@ import ( // K8sTool struct to hold the LLM model type K8sTool struct { - kubeconfig string - llmModel llms.Model + kubeconfig string + llmModel llms.Model + tokenPassthrough bool // when true, require Bearer token and pass it to kubectl; when false, do not use token } func NewK8sTool(llmModel llms.Model) *K8sTool { - return &K8sTool{llmModel: llmModel} + return &K8sTool{llmModel: llmModel, tokenPassthrough: os.Getenv("TOKEN_PASSTHROUGH") == "true"} } func NewK8sToolWithConfig(kubeconfig string, llmModel llms.Model) *K8sTool { - return &K8sTool{kubeconfig: kubeconfig, llmModel: llmModel} + return &K8sTool{kubeconfig: kubeconfig, llmModel: llmModel, tokenPassthrough: os.Getenv("TOKEN_PASSTHROUGH") == "true"} } // runKubectlCommandWithCacheInvalidation runs a kubectl command and invalidates cache if it's a modification operation -func (k *K8sTool) runKubectlCommandWithCacheInvalidation(ctx context.Context, args ...string) (*mcp.CallToolResult, error) { - result, err := k.runKubectlCommand(ctx, args...) +func (k *K8sTool) runKubectlCommandWithCacheInvalidation(ctx context.Context, headers http.Header, args ...string) (*mcp.CallToolResult, error) { + result, err := k.runKubectlCommand(ctx, headers, args...) // If command succeeded and it's a modification command, invalidate cache if err == nil && len(args) > 0 { @@ -82,7 +84,7 @@ func (k *K8sTool) handleKubectlGetEnhanced(ctx context.Context, request mcp.Call args = append(args, "-o", "json") } - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Get pod logs @@ -106,7 +108,7 @@ func (k *K8sTool) handleKubectlLogsEnhanced(ctx context.Context, request mcp.Cal args = append(args, "--tail", fmt.Sprintf("%d", tailLines)) } - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Scale deployment @@ -121,7 +123,7 @@ func (k *K8sTool) handleScaleDeployment(ctx context.Context, request mcp.CallToo args := []string{"scale", "deployment", deploymentName, "--replicas", fmt.Sprintf("%d", replicas), "-n", namespace} - return k.runKubectlCommandWithCacheInvalidation(ctx, args...) + return k.runKubectlCommandWithCacheInvalidation(ctx, request.Header, args...) } // Patch resource @@ -152,7 +154,7 @@ func (k *K8sTool) handlePatchResource(ctx context.Context, request mcp.CallToolR args := []string{"patch", resourceType, resourceName, "-p", patch, "-n", namespace} - return k.runKubectlCommandWithCacheInvalidation(ctx, args...) + return k.runKubectlCommandWithCacheInvalidation(ctx, request.Header, args...) } // Apply manifest from content @@ -197,7 +199,7 @@ func (k *K8sTool) handleApplyManifest(ctx context.Context, request mcp.CallToolR return mcp.NewToolResultError(fmt.Sprintf("Failed to close temp file: %v", err)), nil } - return k.runKubectlCommandWithCacheInvalidation(ctx, "apply", "-f", tmpFile.Name()) + return k.runKubectlCommandWithCacheInvalidation(ctx, request.Header, "apply", "-f", tmpFile.Name()) } // Delete resource @@ -212,7 +214,7 @@ func (k *K8sTool) handleDeleteResource(ctx context.Context, request mcp.CallTool args := []string{"delete", resourceType, resourceName, "-n", namespace} - return k.runKubectlCommandWithCacheInvalidation(ctx, args...) + return k.runKubectlCommandWithCacheInvalidation(ctx, request.Header, args...) } // Check service connectivity @@ -227,23 +229,23 @@ func (k *K8sTool) handleCheckServiceConnectivity(ctx context.Context, request mc // Create a temporary curl pod for connectivity check podName := fmt.Sprintf("curl-test-%d", rand.Intn(10000)) defer func() { - _, _ = k.runKubectlCommand(ctx, "delete", "pod", podName, "-n", namespace, "--ignore-not-found") + _, _ = k.runKubectlCommand(ctx, request.Header, "delete", "pod", podName, "-n", namespace, "--ignore-not-found") }() // Create the curl pod - _, err := k.runKubectlCommand(ctx, "run", podName, "--image=curlimages/curl", "-n", namespace, "--restart=Never", "--", "sleep", "3600") + _, err := k.runKubectlCommand(ctx, request.Header, "run", podName, "--image=curlimages/curl", "-n", namespace, "--restart=Never", "--", "sleep", "3600") if err != nil { return mcp.NewToolResultError(fmt.Sprintf("Failed to create curl pod: %v", err)), nil } // Wait for pod to be ready - _, err = k.runKubectlCommandWithTimeout(ctx, 60*time.Second, "wait", "--for=condition=ready", "pod/"+podName, "-n", namespace) + _, err = k.runKubectlCommandWithTimeout(ctx, request.Header, 60*time.Second, "wait", "--for=condition=ready", "pod/"+podName, "-n", namespace) if err != nil { return mcp.NewToolResultError(fmt.Sprintf("Failed to wait for curl pod: %v", err)), nil } // Execute kubectl command - return k.runKubectlCommand(ctx, "exec", podName, "-n", namespace, "--", "curl", "-s", serviceName) + return k.runKubectlCommand(ctx, request.Header, "exec", podName, "-n", namespace, "--", "curl", "-s", serviceName) } // Get cluster events @@ -257,7 +259,7 @@ func (k *K8sTool) handleGetEvents(ctx context.Context, request mcp.CallToolReque args = append(args, "--all-namespaces") } - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Execute command in pod @@ -287,12 +289,12 @@ func (k *K8sTool) handleExecCommand(ctx context.Context, request mcp.CallToolReq args := []string{"exec", podName, "-n", namespace, "--", command} - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Get available API resources func (k *K8sTool) handleGetAvailableAPIResources(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return k.runKubectlCommand(ctx, "api-resources") + return k.runKubectlCommand(ctx, request.Header, "api-resources") } // Kubectl describe tool @@ -310,7 +312,7 @@ func (k *K8sTool) handleKubectlDescribeTool(ctx context.Context, request mcp.Cal args = append(args, "-n", namespace) } - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Rollout operations @@ -329,12 +331,12 @@ func (k *K8sTool) handleRollout(ctx context.Context, request mcp.CallToolRequest args = append(args, "-n", namespace) } - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Get cluster configuration func (k *K8sTool) handleGetClusterConfiguration(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return k.runKubectlCommand(ctx, "config", "view", "-o", "json") + return k.runKubectlCommand(ctx, request.Header, "config", "view", "-o", "json") } // Remove annotation @@ -353,7 +355,7 @@ func (k *K8sTool) handleRemoveAnnotation(ctx context.Context, request mcp.CallTo args = append(args, "-n", namespace) } - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Remove label @@ -372,7 +374,7 @@ func (k *K8sTool) handleRemoveLabel(ctx context.Context, request mcp.CallToolReq args = append(args, "-n", namespace) } - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Annotate resource @@ -393,7 +395,7 @@ func (k *K8sTool) handleAnnotateResource(ctx context.Context, request mcp.CallTo args = append(args, "-n", namespace) } - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Label resource @@ -414,7 +416,7 @@ func (k *K8sTool) handleLabelResource(ctx context.Context, request mcp.CallToolR args = append(args, "-n", namespace) } - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Create resource from URL @@ -431,7 +433,7 @@ func (k *K8sTool) handleCreateResourceFromURL(ctx context.Context, request mcp.C args = append(args, "-n", namespace) } - return k.runKubectlCommand(ctx, args...) + return k.runKubectlCommand(ctx, request.Header, args...) } // Resource generation embeddings @@ -528,32 +530,64 @@ func (k *K8sTool) handleGenerateResource(ctx context.Context, request mcp.CallTo return mcp.NewToolResultText(responseText), nil } +// extractBearerToken extracts the Bearer token from the Authorization header +func extractBearerToken(headers http.Header) string { + if auth := headers.Get("Authorization"); auth != "" { + if strings.HasPrefix(auth, "Bearer ") { + return strings.TrimPrefix(auth, "Bearer ") + } + } + return "" +} + +// tokenForKubectl returns the token to pass to kubectl and an error if passthrough is true but token is missing. +func (k *K8sTool) tokenForKubectl(headers http.Header) (string, error) { + token := extractBearerToken(headers) + if k.tokenPassthrough && token == "" { + return "", fmt.Errorf("Bearer token required when TOKEN_PASSTHROUGH is true") + } + if k.tokenPassthrough { + return token, nil + } + return "", nil // do not use token when passthrough is false +} + // runKubectlCommand is a helper function to execute kubectl commands -func (k *K8sTool) runKubectlCommand(ctx context.Context, args ...string) (*mcp.CallToolResult, error) { - output, err := commands.NewCommandBuilder("kubectl"). +func (k *K8sTool) runKubectlCommand(ctx context.Context, headers http.Header, args ...string) (*mcp.CallToolResult, error) { + token, err := k.tokenForKubectl(headers) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + builder := commands.NewCommandBuilder("kubectl"). WithArgs(args...). - WithKubeconfig(k.kubeconfig). - Execute(ctx) - + WithKubeconfig(k.kubeconfig) + if token != "" { + builder = builder.WithToken(token) + } + output, err := builder.Execute(ctx) if err != nil { return mcp.NewToolResultError(err.Error()), nil } - return mcp.NewToolResultText(output), nil } // runKubectlCommandWithTimeout is a helper function to execute kubectl commands with a timeout -func (k *K8sTool) runKubectlCommandWithTimeout(ctx context.Context, timeout time.Duration, args ...string) (*mcp.CallToolResult, error) { - output, err := commands.NewCommandBuilder("kubectl"). +func (k *K8sTool) runKubectlCommandWithTimeout(ctx context.Context, headers http.Header, timeout time.Duration, args ...string) (*mcp.CallToolResult, error) { + token, err := k.tokenForKubectl(headers) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + builder := commands.NewCommandBuilder("kubectl"). WithArgs(args...). WithKubeconfig(k.kubeconfig). - WithTimeout(timeout). - Execute(ctx) - + WithTimeout(timeout) + if token != "" { + builder = builder.WithToken(token) + } + output, err := builder.Execute(ctx) if err != nil { return mcp.NewToolResultError(err.Error()), nil } - return mcp.NewToolResultText(output), nil } @@ -611,7 +645,7 @@ func RegisterTools(s *server.MCPServer, llm llms.Model, kubeconfig string, readO args = append(args, "-n", namespace) } - result, err := k8sTool.runKubectlCommand(ctx, args...) + result, err := k8sTool.runKubectlCommand(ctx, request.Header, args...) if err != nil { return mcp.NewToolResultError(fmt.Sprintf("Get YAML command failed: %v", err)), nil } @@ -737,7 +771,7 @@ func RegisterTools(s *server.MCPServer, llm llms.Model, kubeconfig string, readO } tmpFile.Close() - result, err := k8sTool.runKubectlCommand(ctx, "create", "-f", tmpFile.Name()) + result, err := k8sTool.runKubectlCommand(ctx, request.Header, "create", "-f", tmpFile.Name()) if err != nil { return mcp.NewToolResultError(fmt.Sprintf("Create command failed: %v", err)), nil } diff --git a/pkg/k8s/k8s_test.go b/pkg/k8s/k8s_test.go index e373066..8ac0340 100644 --- a/pkg/k8s/k8s_test.go +++ b/pkg/k8s/k8s_test.go @@ -2,6 +2,7 @@ package k8s import ( "context" + "net/http" "testing" "github.com/kagent-dev/tools/internal/cmd" @@ -16,6 +17,13 @@ func newTestK8sTool() *K8sTool { return NewK8sTool(nil) } +// newTestK8sToolWithPassthrough creates a K8sTool with token passthrough set for testing. +func newTestK8sToolWithPassthrough(passthrough bool) *K8sTool { + t := NewK8sTool(nil) + t.tokenPassthrough = passthrough + return t +} + // Helper function to create a test K8sTool with mock LLM func newTestK8sToolWithLLM(llm llms.Model) *K8sTool { return NewK8sTool(llm) @@ -32,6 +40,21 @@ func getResultText(result *mcp.CallToolResult) string { return "" } +// Helper function to create an http.Header with Bearer token authorization +func headerWithBearerToken(token string) http.Header { + h := http.Header{} + h.Set("Authorization", "Bearer "+token) + return h +} + +// Helper function to create a CallToolRequest with Bearer token +func requestWithBearerToken(token string, args map[string]interface{}) mcp.CallToolRequest { + req := mcp.CallToolRequest{} + req.Header = headerWithBearerToken(token) + req.Params.Arguments = args + return req +} + func TestHandleGetAvailableAPIResources(t *testing.T) { ctx := context.Background() @@ -1063,3 +1086,452 @@ users: assert.Contains(t, resultText, "clusters") }) } + +// Tests for Bearer token passing to kubectl commands +func TestBearerTokenPassthrough(t *testing.T) { + ctx := context.Background() + + t.Run("get resources with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `NAME READY STATUS RESTARTS AGE` + mock.AddCommandString("kubectl", []string{"get", "pods", "-o", "wide", "--token", "test-token-123"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("test-token-123", map[string]interface{}{"resource_type": "pods"}) + result, err := k8sTool.handleKubectlGetEnhanced(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + // Verify the command was executed with the token + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Equal(t, "kubectl", callLog[0].Command) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "test-token-123") + }) + + t.Run("scale deployment with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `deployment.apps/test-deployment scaled` + mock.AddCommandString("kubectl", []string{"scale", "deployment", "test-deployment", "--replicas", "5", "-n", "default", "--token", "my-auth-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("my-auth-token", map[string]interface{}{ + "name": "test-deployment", + "replicas": float64(5), + }) + + result, err := k8sTool.handleScaleDeployment(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + // Verify the command was executed with the token + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "my-auth-token") + }) + + t.Run("get pod logs with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `log line 1 +log line 2` + mock.AddCommandString("kubectl", []string{"logs", "test-pod", "-n", "default", "--tail", "50", "--token", "logs-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("logs-token", map[string]interface{}{"pod_name": "test-pod"}) + result, err := k8sTool.handleKubectlLogsEnhanced(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "logs-token") + }) + + t.Run("delete resource with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `deployment.apps/test-deployment deleted` + mock.AddCommandString("kubectl", []string{"delete", "deployment", "test-deployment", "-n", "default", "--token", "delete-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("delete-token", map[string]interface{}{ + "resource_type": "deployment", + "resource_name": "test-deployment", + }) + + result, err := k8sTool.handleDeleteResource(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "delete-token") + }) + + t.Run("patch resource with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `deployment.apps/test-deployment patched` + mock.AddCommandString("kubectl", []string{"patch", "deployment", "test-deployment", "-p", `{"spec":{"replicas":5}}`, "-n", "default", "--token", "patch-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("patch-token", map[string]interface{}{ + "resource_type": "deployment", + "resource_name": "test-deployment", + "patch": `{"spec":{"replicas":5}}`, + }) + + result, err := k8sTool.handlePatchResource(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "patch-token") + }) + + t.Run("describe resource with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `Name: test-deployment` + mock.AddCommandString("kubectl", []string{"describe", "deployment", "test-deployment", "-n", "default", "--token", "describe-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("describe-token", map[string]interface{}{ + "resource_type": "deployment", + "resource_name": "test-deployment", + "namespace": "default", + }) + + result, err := k8sTool.handleKubectlDescribeTool(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "describe-token") + }) + + t.Run("rollout with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `deployment.apps/myapp restarted` + mock.AddCommandString("kubectl", []string{"rollout", "restart", "deployment/myapp", "-n", "default", "--token", "rollout-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("rollout-token", map[string]interface{}{ + "action": "restart", + "resource_type": "deployment", + "resource_name": "myapp", + "namespace": "default", + }) + + result, err := k8sTool.handleRollout(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "rollout-token") + }) + + t.Run("get events with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `{"items": []}` + mock.AddCommandString("kubectl", []string{"get", "events", "-o", "json", "--all-namespaces", "--token", "events-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("events-token", nil) + result, err := k8sTool.handleGetEvents(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "events-token") + }) + + t.Run("exec command with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `total 8` + mock.AddCommandString("kubectl", []string{"exec", "mypod", "-n", "default", "--", "ls -la", "--token", "exec-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("exec-token", map[string]interface{}{ + "pod_name": "mypod", + "namespace": "default", + "command": "ls -la", + }) + + result, err := k8sTool.handleExecCommand(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "exec-token") + }) + + t.Run("annotate resource with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `deployment.apps/test-deployment annotated` + mock.AddCommandString("kubectl", []string{"annotate", "deployment", "test-deployment", "key1=value1", "--token", "annotate-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("annotate-token", map[string]interface{}{ + "resource_type": "deployment", + "resource_name": "test-deployment", + "annotations": "key1=value1", + }) + + result, err := k8sTool.handleAnnotateResource(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "annotate-token") + }) + + t.Run("label resource with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `deployment.apps/test-deployment labeled` + mock.AddCommandString("kubectl", []string{"label", "deployment", "test-deployment", "env=prod", "--token", "label-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("label-token", map[string]interface{}{ + "resource_type": "deployment", + "resource_name": "test-deployment", + "labels": "env=prod", + }) + + result, err := k8sTool.handleLabelResource(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "label-token") + }) + + t.Run("api resources with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `NAME SHORTNAMES APIVERSION NAMESPACED KIND` + mock.AddCommandString("kubectl", []string{"api-resources", "--token", "api-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("api-token", nil) + result, err := k8sTool.handleGetAvailableAPIResources(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "api-token") + }) + + t.Run("cluster configuration with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `{"current-context": "default"}` + mock.AddCommandString("kubectl", []string{"config", "view", "-o", "json", "--token", "config-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("config-token", nil) + result, err := k8sTool.handleGetClusterConfiguration(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "config-token") + }) + + t.Run("remove annotation with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `deployment.apps/test-deployment annotated` + mock.AddCommandString("kubectl", []string{"annotate", "deployment", "test-deployment", "key1-", "--token", "remove-anno-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("remove-anno-token", map[string]interface{}{ + "resource_type": "deployment", + "resource_name": "test-deployment", + "annotation_key": "key1", + }) + + result, err := k8sTool.handleRemoveAnnotation(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "remove-anno-token") + }) + + t.Run("remove label with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `deployment.apps/test-deployment labeled` + mock.AddCommandString("kubectl", []string{"label", "deployment", "test-deployment", "env-", "--token", "remove-label-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("remove-label-token", map[string]interface{}{ + "resource_type": "deployment", + "resource_name": "test-deployment", + "label_key": "env", + }) + + result, err := k8sTool.handleRemoveLabel(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "remove-label-token") + }) + + t.Run("create resource from URL with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `deployment.apps/test-deployment created` + mock.AddCommandString("kubectl", []string{"create", "-f", "https://example.com/manifest.yaml", "-n", "default", "--token", "url-token"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("url-token", map[string]interface{}{ + "url": "https://example.com/manifest.yaml", + "namespace": "default", + }) + + result, err := k8sTool.handleCreateResourceFromURL(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "url-token") + }) + + t.Run("apply manifest with bearer token", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + manifest := `apiVersion: v1 +kind: Pod +metadata: + name: test-pod` + expectedOutput := `pod/test-pod created` + // Use partial matcher since temp file name is dynamic + mock.AddPartialMatcherString("kubectl", []string{"apply", "-f"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(true) + req := requestWithBearerToken("apply-token", map[string]interface{}{ + "manifest": manifest, + }) + + result, err := k8sTool.handleApplyManifest(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.Contains(t, callLog[0].Args, "--token") + assert.Contains(t, callLog[0].Args, "apply-token") + }) + + t.Run("returns error when passthrough true and authorization header missing", func(t *testing.T) { + k8sTool := newTestK8sToolWithPassthrough(true) + req := mcp.CallToolRequest{} + req.Params.Arguments = map[string]interface{}{"resource_type": "pods"} + result, err := k8sTool.handleKubectlGetEnhanced(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.True(t, result.IsError) + assert.Contains(t, getResultText(result), "Bearer token required") + }) + + t.Run("no token when passthrough false and authorization header missing", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `NAME READY STATUS RESTARTS AGE` + // No --token in expected args when passthrough is false + mock.AddCommandString("kubectl", []string{"get", "pods", "-o", "wide"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(false) + req := mcp.CallToolRequest{} + req.Params.Arguments = map[string]interface{}{"resource_type": "pods"} + // No Header set on request + result, err := k8sTool.handleKubectlGetEnhanced(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + // Verify no --token was added + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.NotContains(t, callLog[0].Args, "--token") + }) + + t.Run("no token when passthrough false and authorization header is not bearer", func(t *testing.T) { + mock := cmd.NewMockShellExecutor() + expectedOutput := `NAME READY STATUS RESTARTS AGE` + // No --token when passthrough is false + mock.AddCommandString("kubectl", []string{"get", "pods", "-o", "wide"}, expectedOutput, nil) + ctx := cmd.WithShellExecutor(ctx, mock) + + k8sTool := newTestK8sToolWithPassthrough(false) + req := mcp.CallToolRequest{} + req.Header = http.Header{} + req.Header.Set("Authorization", "Basic dXNlcjpwYXNz") + req.Params.Arguments = map[string]interface{}{"resource_type": "pods"} + result, err := k8sTool.handleKubectlGetEnhanced(ctx, req) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.IsError) + + // Verify no --token was added + callLog := mock.GetCallLog() + require.Len(t, callLog, 1) + assert.NotContains(t, callLog[0].Args, "--token") + }) +}