Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions AGENTS.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ FastMCP is a comprehensive Python framework (Python ≥3.10) for building Model
```bash
uv sync # Install dependencies
uv run prek run --all-files # Ruff + Prettier + ty
uv run pytest -n auto # Run full test suite
uv run pytest # Run full test suite
```

**All three must pass** - this is enforced by CI. Alternative: `just build && just typecheck && just test`
Expand Down Expand Up @@ -242,9 +242,9 @@ uv sync # Installs all deps including dev tools

### Testing

- **Standard**: `uv run pytest -n auto`
- **Integration**: `uv run pytest -n auto -m "integration"`
- **Excluding markers**: `uv run pytest -n auto -m "not integration and not client_process"`
- **Standard**: `uv run pytest`
- **Integration**: `uv run pytest -m "integration"`
- **Excluding markers**: `uv run pytest -m "not integration and not client_process"`

### CLI Usage

Expand Down
42 changes: 2 additions & 40 deletions docs/servers/prompts.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -195,58 +195,20 @@ FastMCP intelligently handles different return types from your prompt function:
- **`str`**: Automatically converted to a single `PromptMessage`.
- **`PromptMessage`**: Used directly as provided. (Note a more user-friendly `Message` constructor is available that can accept raw strings instead of `TextContent` objects.)
- **`list[PromptMessage | str]`**: Used as a sequence of messages (a conversation).
- **`PromptResult`**: Full control over messages, description, and metadata. See [PromptResult](#promptresult) below.
- **`Any`**: If the return type is not one of the above, the return value is attempted to be converted to a string and used as a `PromptMessage`.

```python
from fastmcp.prompts.prompt import Message
from fastmcp.prompts.prompt import Message, PromptResult

@mcp.prompt
def roleplay_scenario(character: str, situation: str) -> list[Message]:
def roleplay_scenario(character: str, situation: str) -> PromptResult:
"""Sets up a roleplaying scenario with initial messages."""
return [
Message(f"Let's roleplay. You are {character}. The situation is: {situation}"),
Message("Okay, I understand. I am ready. What happens next?", role="assistant")
]
```

#### PromptResult

<VersionBadge version="2.14.1" />

For complete control over prompt responses, return a `PromptResult` object. This lets you include metadata alongside your prompt messages, which is useful for passing runtime information to clients.

```python
from fastmcp import FastMCP
from fastmcp.prompts import PromptResult, Message

mcp = FastMCP(name="PromptServer")

@mcp.prompt
def code_review(code: str) -> PromptResult:
"""Returns a code review prompt with metadata."""
return PromptResult(
messages=[
Message(f"Please review this code:\n\n```\n{code}\n```"),
],
description="Code review prompt",
meta={"review_type": "security", "priority": "high"}
)
```

`PromptResult` accepts three fields:

**`messages`** - A list of `PromptMessage` or `Message` objects representing the conversation to send to the LLM.

**`description`** - Optional description of the prompt result. If not provided, defaults to the prompt's docstring.

**`meta`** - Optional metadata dictionary that will be included in the MCP response's `_meta` field. Use this for runtime metadata like categorization, priority, or other client-specific data.

<Note>
The `meta` field in `PromptResult` is for runtime metadata specific to this render response. This is separate from the `meta` parameter in `@mcp.prompt(meta={...})`, which provides static metadata about the prompt definition itself (returned when listing prompts).
</Note>

You can still return plain `str`, `PromptMessage`, or lists from your prompt functions—`PromptResult` is opt-in for when you need to include metadata.

### Required vs. Optional Parameters

Expand Down
3 changes: 1 addition & 2 deletions src/fastmcp/prompts/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
from .prompt import Message, Prompt, PromptResult, PromptMessage
from .prompt import Prompt, PromptMessage, Message
from .prompt_manager import PromptManager

__all__ = [
"Message",
"Prompt",
"PromptManager",
"PromptMessage",
"PromptResult",
]
97 changes: 9 additions & 88 deletions src/fastmcp/prompts/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,15 @@

import inspect
import json
import warnings
from collections.abc import Awaitable, Callable, Sequence
from typing import Annotated, Any

import pydantic_core
from mcp import GetPromptResult
from mcp.types import ContentBlock, Icon, PromptMessage, Role, TextContent
from mcp.types import Prompt as MCPPrompt
from mcp.types import PromptArgument as MCPPromptArgument
from pydantic import Field, TypeAdapter

from fastmcp import settings
from fastmcp.exceptions import PromptError
from fastmcp.server.dependencies import get_context, without_injected_parameters
from fastmcp.server.tasks.config import TaskConfig
Expand Down Expand Up @@ -43,14 +40,13 @@ def Message(

message_validator = TypeAdapter[PromptMessage](PromptMessage)

# Type aliases for what prompt functions can return (before conversion to PromptResult)
_SyncPromptFnReturn = (
SyncPromptResult = (
str
| PromptMessage
| dict[str, Any]
| Sequence[str | PromptMessage | dict[str, Any]]
)
_PromptFnReturn = _SyncPromptFnReturn | Awaitable[_SyncPromptFnReturn]
PromptResult = SyncPromptResult | Awaitable[SyncPromptResult]


class PromptArgument(FastMCPBaseModel):
Expand All @@ -65,51 +61,6 @@ class PromptArgument(FastMCPBaseModel):
)


class PromptResult(FastMCPBaseModel):
"""Canonical result type for prompt rendering.

This is the internal type that all prompt renders return. It wraps the
messages with optional description and metadata.
"""

messages: list[PromptMessage] = Field(description="The prompt messages to return")
description: str | None = Field(
default=None, description="Optional description of the prompt result"
)
meta: dict[str, Any] | None = Field(
default=None, description="Optional metadata about the prompt result"
)

@classmethod
def from_value(
cls,
value: list[PromptMessage] | PromptResult,
description: str | None = None,
meta: dict[str, Any] | None = None,
) -> PromptResult:
"""Convert various types to PromptResult."""
if isinstance(value, PromptResult):
# Merge meta if provided
if meta and value.meta:
merged_meta = {**value.meta, **meta}
else:
merged_meta = meta or value.meta
return cls(
messages=value.messages,
description=description or value.description,
meta=merged_meta,
)
return cls(messages=value, description=description, meta=meta)

def to_mcp_prompt_result(self) -> GetPromptResult:
"""Convert to MCP GetPromptResult."""
return GetPromptResult(
description=self.description,
messages=self.messages,
_meta=self.meta,
)


class Prompt(FastMCPComponent):
"""A prompt template that can be rendered with parameters."""

Expand Down Expand Up @@ -162,7 +113,7 @@ def to_mcp_prompt(

@staticmethod
def from_function(
fn: Callable[..., _PromptFnReturn | Awaitable[_PromptFnReturn]],
fn: Callable[..., PromptResult | Awaitable[PromptResult]],
name: str | None = None,
title: str | None = None,
description: str | None = None,
Expand Down Expand Up @@ -195,45 +146,19 @@ def from_function(
async def render(
self,
arguments: dict[str, Any] | None = None,
) -> list[PromptMessage] | PromptResult:
) -> list[PromptMessage]:
"""Render the prompt with arguments.

This method is not implemented in the base Prompt class and must be
implemented by subclasses. The preferred return type is PromptResult,
but list[PromptMessage] is still supported for backwards compatibility.
implemented by subclasses.
"""
raise NotImplementedError("Subclasses must implement render()")

async def _render(
self,
arguments: dict[str, Any] | None = None,
) -> PromptResult:
"""Internal API that always returns PromptResult.

Calls render() and wraps list[PromptMessage] in PromptResult.
This is what PromptManager calls internally.
"""
result = await self.render(arguments)
if isinstance(result, PromptResult):
return result
# Deprecated in 2.14.1: returning list[PromptMessage] from render()
if settings.deprecation_warnings:
warnings.warn(
f"Prompt.render() returning list[PromptMessage] is deprecated (since 2.14.1). "
f"Return PromptResult instead. "
f"(Prompt: {self.__class__.__name__}, Name: {self.name})",
DeprecationWarning,
stacklevel=2,
)
return PromptResult.from_value(
result, description=self.description, meta=self.meta
)


class FunctionPrompt(Prompt):
"""A prompt that is a function."""

fn: Callable[..., _PromptFnReturn | Awaitable[_PromptFnReturn]]
fn: Callable[..., PromptResult | Awaitable[PromptResult]]
task_config: Annotated[
TaskConfig,
Field(description="Background task execution configuration (SEP-1686)."),
Expand All @@ -242,7 +167,7 @@ class FunctionPrompt(Prompt):
@classmethod
def from_function(
cls,
fn: Callable[..., _PromptFnReturn | Awaitable[_PromptFnReturn]],
fn: Callable[..., PromptResult | Awaitable[PromptResult]],
name: str | None = None,
title: str | None = None,
description: str | None = None,
Expand Down Expand Up @@ -397,7 +322,7 @@ def _convert_string_arguments(self, kwargs: dict[str, Any]) -> dict[str, Any]:
async def render(
self,
arguments: dict[str, Any] | None = None,
) -> PromptResult:
) -> list[PromptMessage]:
"""Render the prompt with arguments."""
# Validate required arguments
if self.arguments:
Expand Down Expand Up @@ -450,11 +375,7 @@ async def render(
"Could not convert prompt result to message."
) from e

return PromptResult(
messages=messages,
description=self.description,
meta=self.meta,
)
return messages
except Exception as e:
logger.exception(f"Error rendering prompt {self.name}")
raise PromptError(f"Error rendering prompt {self.name}.") from e
26 changes: 10 additions & 16 deletions src/fastmcp/prompts/prompt_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,11 @@
from collections.abc import Awaitable, Callable
from typing import Any

from mcp import GetPromptResult

from fastmcp import settings
from fastmcp.exceptions import NotFoundError, PromptError
from fastmcp.prompts.prompt import (
FunctionPrompt,
Prompt,
PromptResult,
_PromptFnReturn,
)
from fastmcp.prompts.prompt import FunctionPrompt, Prompt, PromptResult
from fastmcp.settings import DuplicateBehavior
from fastmcp.utilities.logging import get_logger

Expand All @@ -27,11 +24,7 @@ def __init__(
mask_error_details: bool | None = None,
):
self._prompts: dict[str, Prompt] = {}
self.mask_error_details = (
settings.mask_error_details
if mask_error_details is None
else mask_error_details
)
self.mask_error_details = mask_error_details or settings.mask_error_details
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Bug: mask_error_details=False will be ignored.

Using or operator means an explicit False value will be overridden by settings.mask_error_details. If a caller passes mask_error_details=False, the settings default will be used instead.

Apply this diff to preserve explicit False values:

-        self.mask_error_details = mask_error_details or settings.mask_error_details
+        self.mask_error_details = mask_error_details if mask_error_details is not None else settings.mask_error_details
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
self.mask_error_details = mask_error_details or settings.mask_error_details
self.mask_error_details = mask_error_details if mask_error_details is not None else settings.mask_error_details
🤖 Prompt for AI Agents
In src/fastmcp/prompts/prompt_manager.py around line 27, the assignment
self.mask_error_details = mask_error_details or settings.mask_error_details
ignores an explicit False passed by callers; change the assignment to preserve
explicit False by using a None check (e.g. self.mask_error_details =
mask_error_details if mask_error_details is not None else
settings.mask_error_details) so that only None falls back to settings.


# Default to "warn" if None is provided
if duplicate_behavior is None:
Expand Down Expand Up @@ -65,7 +58,7 @@ async def get_prompts(self) -> dict[str, Prompt]:

def add_prompt_from_fn(
self,
fn: Callable[..., _PromptFnReturn | Awaitable[_PromptFnReturn]],
fn: Callable[..., PromptResult | Awaitable[PromptResult]],
name: str | None = None,
description: str | None = None,
tags: set[str] | None = None,
Expand Down Expand Up @@ -105,17 +98,18 @@ async def render_prompt(
self,
name: str,
arguments: dict[str, Any] | None = None,
) -> PromptResult:
) -> GetPromptResult:
"""
Internal API for servers: Finds and renders a prompt, respecting the
filtered protocol path.
"""
prompt = await self.get_prompt(name)
try:
return await prompt._render(arguments)
except PromptError:
messages = await prompt.render(arguments)
return GetPromptResult(description=prompt.description, messages=messages)
except PromptError as e:
logger.exception(f"Error rendering prompt {name!r}")
raise
raise e
except Exception as e:
logger.exception(f"Error rendering prompt {name!r}")
if self.mask_error_details:
Expand Down
20 changes: 12 additions & 8 deletions src/fastmcp/server/middleware/caching.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from pydantic import BaseModel, Field
from typing_extensions import NotRequired, Self, override

from fastmcp.prompts.prompt import Prompt, PromptResult
from fastmcp.prompts.prompt import Prompt
from fastmcp.resources.resource import Resource, ResourceContent
from fastmcp.server.middleware.middleware import CallNext, Middleware, MiddlewareContext
from fastmcp.tools.tool import Tool, ToolResult
Expand Down Expand Up @@ -220,10 +220,12 @@ def __init__(
default_collection="resources/read",
)

self._get_prompt_cache: PydanticAdapter[PromptResult] = PydanticAdapter(
key_value=self._stats,
pydantic_model=PromptResult,
default_collection="prompts/get",
self._get_prompt_cache: PydanticAdapter[mcp.types.GetPromptResult] = (
PydanticAdapter(
key_value=self._stats,
pydantic_model=mcp.types.GetPromptResult,
default_collection="prompts/get",
)
)

self._call_tool_cache: PydanticAdapter[CachableToolResult] = PydanticAdapter(
Expand Down Expand Up @@ -417,8 +419,10 @@ async def on_read_resource(
async def on_get_prompt(
self,
context: MiddlewareContext[mcp.types.GetPromptRequestParams],
call_next: CallNext[mcp.types.GetPromptRequestParams, PromptResult],
) -> PromptResult:
call_next: CallNext[
mcp.types.GetPromptRequestParams, mcp.types.GetPromptResult
],
) -> mcp.types.GetPromptResult:
"""Get a prompt from the cache, if caching is enabled, and the result is in the cache. Otherwise,
otherwise call the next middleware and store the result in the cache if caching is enabled."""
if self._get_prompt_settings.get("enabled") is False:
Expand All @@ -429,7 +433,7 @@ async def on_get_prompt(
if cached_value := await self._get_prompt_cache.get(key=cache_key):
return cached_value

value: PromptResult = await call_next(context=context)
value: mcp.types.GetPromptResult = await call_next(context=context)

await self._get_prompt_cache.put(
key=cache_key,
Expand Down
Loading
Loading