From 2d9b11bbb30fa40b269f95baa2eb6cb3ccfa0067 Mon Sep 17 00:00:00 2001 From: Marcelo Trylesinski Date: Fri, 16 Jan 2026 15:12:58 +0100 Subject: [PATCH 1/3] refactor: use snake case instead of camel case in types --- .../simple-chatbot/mcp_simple_chatbot/main.py | 2 +- .../mcp_simple_task_client/main.py | 4 +- .../simple-task-interactive-client/README.md | 2 +- .../main.py | 4 +- examples/fastmcp/icons_demo.py | 8 +- examples/fastmcp/weather_structured.py | 18 +-- .../mcp_everything_server/server.py | 18 +-- .../mcp_simple_resource/server.py | 2 +- .../snippets/clients/parsing_tool_results.py | 8 +- examples/snippets/clients/stdio_client.py | 4 +- .../clients/url_elicitation_client.py | 2 +- src/mcp/client/experimental/tasks.py | 10 +- src/mcp/client/session.py | 20 +-- src/mcp/client/session_group.py | 6 +- src/mcp/client/sse.py | 2 +- src/mcp/client/stdio/__init__.py | 2 +- src/mcp/client/streamable_http.py | 8 +- src/mcp/client/websocket.py | 2 +- src/mcp/server/elicitation.py | 2 +- .../server/experimental/request_context.py | 2 +- .../server/experimental/session_features.py | 26 ++-- src/mcp/server/experimental/task_context.py | 28 ++-- .../experimental/task_result_handler.py | 4 +- src/mcp/server/fastmcp/server.py | 12 +- .../server/fastmcp/utilities/func_metadata.py | 2 +- src/mcp/server/lowlevel/experimental.py | 18 +-- src/mcp/server/lowlevel/server.py | 28 ++-- src/mcp/server/session.py | 66 ++++----- src/mcp/server/sse.py | 2 +- src/mcp/server/stdio.py | 2 +- src/mcp/server/streamable_http.py | 2 +- src/mcp/server/validation.py | 2 +- src/mcp/server/websocket.py | 2 +- src/mcp/shared/exceptions.py | 2 +- .../shared/experimental/tasks/capabilities.py | 6 +- src/mcp/shared/experimental/tasks/context.py | 2 +- src/mcp/shared/experimental/tasks/helpers.py | 8 +- .../tasks/in_memory_task_store.py | 12 +- src/mcp/shared/experimental/tasks/polling.py | 2 +- src/mcp/shared/progress.py | 4 +- src/mcp/shared/session.py | 12 +- src/mcp/types.py | 127 +++++++++--------- tests/client/test_http_unicode.py | 2 +- tests/client/test_list_roots_callback.py | 4 +- tests/client/test_logging_callback.py | 6 +- tests/client/test_output_schema_validation.py | 22 +-- tests/client/test_sampling_callback.py | 14 +- tests/client/test_session.py | 70 +++++----- tests/client/test_session_group.py | 6 +- .../tasks/client/test_capabilities.py | 16 +-- .../tasks/client/test_handlers.py | 122 ++++++++--------- .../tasks/client/test_poll_task.py | 14 +- tests/experimental/tasks/client/test_tasks.py | 78 +++++------ .../experimental/tasks/server/test_context.py | 24 ++-- .../tasks/server/test_integration.py | 60 ++++----- .../tasks/server/test_run_task_flow.py | 32 ++--- .../experimental/tasks/server/test_server.py | 90 ++++++------- .../tasks/server/test_server_task_context.py | 62 ++++----- tests/experimental/tasks/server/test_store.py | 88 ++++++------ .../tasks/server/test_task_result_handler.py | 46 +++---- tests/experimental/tasks/test_capabilities.py | 16 +-- .../tasks/test_elicitation_scenarios.py | 92 ++++++------- .../tasks/test_request_context.py | 10 +- .../tasks/test_spec_compliance.py | 6 +- .../test_1027_win_unreachable_cleanup.py | 2 +- tests/issues/test_129_resource_templates.py | 6 +- tests/issues/test_1338_icons_and_metadata.py | 12 +- tests/issues/test_141_resource_templates.py | 8 +- tests/issues/test_152_resource_mime_type.py | 20 +-- .../test_1574_resource_uri_validation.py | 2 +- .../issues/test_1754_mime_type_parameters.py | 8 +- tests/issues/test_176_progress_token.py | 2 +- tests/issues/test_88_random_error.py | 4 +- tests/server/fastmcp/prompts/test_base.py | 10 +- tests/server/fastmcp/test_elicitation.py | 2 +- tests/server/fastmcp/test_func_metadata.py | 4 +- tests/server/fastmcp/test_integration.py | 20 +-- .../fastmcp/test_parameter_descriptions.py | 2 +- tests/server/fastmcp/test_server.py | 116 ++++++++-------- tests/server/fastmcp/test_title.py | 22 +-- tests/server/fastmcp/test_tool_manager.py | 16 +-- tests/server/fastmcp/test_url_elicitation.py | 16 +-- .../test_url_elicitation_error_throw.py | 14 +- tests/server/lowlevel/test_server_listing.py | 4 +- tests/server/test_cancel_handling.py | 4 +- .../server/test_lowlevel_input_validation.py | 20 +-- .../server/test_lowlevel_output_validation.py | 60 ++++----- .../server/test_lowlevel_tool_annotations.py | 6 +- tests/server/test_read_resource.py | 6 +- tests/server/test_session.py | 32 ++--- tests/server/test_session_race_condition.py | 8 +- tests/server/test_stateless_mode.py | 4 +- tests/server/test_validation.py | 12 +- tests/shared/test_exceptions.py | 22 +-- tests/shared/test_progress_notifications.py | 8 +- tests/shared/test_session.py | 2 +- tests/shared/test_sse.py | 14 +- tests/shared/test_streamable_http.py | 36 ++--- tests/shared/test_ws.py | 6 +- tests/test_examples.py | 4 +- tests/test_types.py | 40 +++--- 101 files changed, 976 insertions(+), 973 deletions(-) diff --git a/examples/clients/simple-chatbot/mcp_simple_chatbot/main.py b/examples/clients/simple-chatbot/mcp_simple_chatbot/main.py index cec26aae0e..72b1a6f204 100644 --- a/examples/clients/simple-chatbot/mcp_simple_chatbot/main.py +++ b/examples/clients/simple-chatbot/mcp_simple_chatbot/main.py @@ -112,7 +112,7 @@ async def list_tools(self) -> list[Tool]: for item in tools_response: if item[0] == "tools": - tools.extend(Tool(tool.name, tool.description, tool.inputSchema, tool.title) for tool in item[1]) + tools.extend(Tool(tool.name, tool.description, tool.input_schema, tool.title) for tool in item[1]) return tools diff --git a/examples/clients/simple-task-client/mcp_simple_task_client/main.py b/examples/clients/simple-task-client/mcp_simple_task_client/main.py index 5050900208..1e653d58e4 100644 --- a/examples/clients/simple-task-client/mcp_simple_task_client/main.py +++ b/examples/clients/simple-task-client/mcp_simple_task_client/main.py @@ -25,13 +25,13 @@ async def run(url: str) -> None: arguments={}, ttl=60000, ) - task_id = result.task.taskId + task_id = result.task.task_id print(f"Task created: {task_id}") status = None # Poll until done (respects server's pollInterval hint) async for status in session.experimental.poll_task(task_id): - print(f" Status: {status.status} - {status.statusMessage or ''}") + print(f" Status: {status.status} - {status.status_message or ''}") # Check final status if status and status.status != "completed": diff --git a/examples/clients/simple-task-interactive-client/README.md b/examples/clients/simple-task-interactive-client/README.md index ac73d2bc12..3397d3b5d7 100644 --- a/examples/clients/simple-task-interactive-client/README.md +++ b/examples/clients/simple-task-interactive-client/README.md @@ -49,7 +49,7 @@ async def sampling_callback(context, params) -> CreateMessageResult: ```python # Call a tool as a task (returns immediately with task reference) result = await session.experimental.call_tool_as_task("tool_name", {"arg": "value"}) -task_id = result.task.taskId +task_id = result.task.task_id # Get result - this delivers elicitation/sampling requests and blocks until complete final = await session.experimental.get_task_result(task_id, CallToolResult) diff --git a/examples/clients/simple-task-interactive-client/mcp_simple_task_interactive_client/main.py b/examples/clients/simple-task-interactive-client/mcp_simple_task_interactive_client/main.py index d99604ddf1..5f34eb9491 100644 --- a/examples/clients/simple-task-interactive-client/mcp_simple_task_interactive_client/main.py +++ b/examples/clients/simple-task-interactive-client/mcp_simple_task_interactive_client/main.py @@ -91,7 +91,7 @@ async def run(url: str) -> None: print("Calling confirm_delete tool...") elicit_task = await session.experimental.call_tool_as_task("confirm_delete", {"filename": "important.txt"}) - elicit_task_id = elicit_task.task.taskId + elicit_task_id = elicit_task.task.task_id print(f"Task created: {elicit_task_id}") # Poll until terminal, calling tasks/result on input_required @@ -112,7 +112,7 @@ async def run(url: str) -> None: print("Calling write_haiku tool...") sampling_task = await session.experimental.call_tool_as_task("write_haiku", {"topic": "autumn leaves"}) - sampling_task_id = sampling_task.task.taskId + sampling_task_id = sampling_task.task.task_id print(f"Task created: {sampling_task_id}") # Poll until terminal, calling tasks/result on input_required diff --git a/examples/fastmcp/icons_demo.py b/examples/fastmcp/icons_demo.py index c6cf48acd8..6f6da6b9e1 100644 --- a/examples/fastmcp/icons_demo.py +++ b/examples/fastmcp/icons_demo.py @@ -14,7 +14,7 @@ icon_data = base64.standard_b64encode(icon_path.read_bytes()).decode() icon_data_uri = f"data:image/png;base64,{icon_data}" -icon_data = Icon(src=icon_data_uri, mimeType="image/png", sizes=["64x64"]) +icon_data = Icon(src=icon_data_uri, mime_type="image/png", sizes=["64x64"]) # Create server with icons in implementation mcp = FastMCP("Icons Demo Server", website_url="https://github.com/modelcontextprotocol/python-sdk", icons=[icon_data]) @@ -40,9 +40,9 @@ def prompt_with_icon(text: str) -> str: @mcp.tool( icons=[ - Icon(src=icon_data_uri, mimeType="image/png", sizes=["16x16"]), - Icon(src=icon_data_uri, mimeType="image/png", sizes=["32x32"]), - Icon(src=icon_data_uri, mimeType="image/png", sizes=["64x64"]), + Icon(src=icon_data_uri, mime_type="image/png", sizes=["16x16"]), + Icon(src=icon_data_uri, mime_type="image/png", sizes=["32x32"]), + Icon(src=icon_data_uri, mime_type="image/png", sizes=["64x64"]), ] ) def multi_icon_tool(action: str) -> str: diff --git a/examples/fastmcp/weather_structured.py b/examples/fastmcp/weather_structured.py index 20cbf79578..87ad8993fc 100644 --- a/examples/fastmcp/weather_structured.py +++ b/examples/fastmcp/weather_structured.py @@ -161,32 +161,32 @@ async def test() -> None: # Test get_weather result = await client.call_tool("get_weather", {"city": "London"}) print("\nWeather in London:") - print(json.dumps(result.structuredContent, indent=2)) + print(json.dumps(result.structured_content, indent=2)) # Test get_weather_summary result = await client.call_tool("get_weather_summary", {"city": "Paris"}) print("\nWeather summary for Paris:") - print(json.dumps(result.structuredContent, indent=2)) + print(json.dumps(result.structured_content, indent=2)) # Test get_weather_metrics result = await client.call_tool("get_weather_metrics", {"cities": ["Tokyo", "Sydney", "Mumbai"]}) print("\nWeather metrics:") - print(json.dumps(result.structuredContent, indent=2)) + print(json.dumps(result.structured_content, indent=2)) # Test get_weather_alerts result = await client.call_tool("get_weather_alerts", {"region": "California"}) print("\nWeather alerts for California:") - print(json.dumps(result.structuredContent, indent=2)) + print(json.dumps(result.structured_content, indent=2)) # Test get_temperature result = await client.call_tool("get_temperature", {"city": "Berlin", "unit": "fahrenheit"}) print("\nTemperature in Berlin:") - print(json.dumps(result.structuredContent, indent=2)) + print(json.dumps(result.structured_content, indent=2)) # Test get_weather_stats result = await client.call_tool("get_weather_stats", {"city": "Seattle", "days": 30}) print("\nWeather stats for Seattle (30 days):") - print(json.dumps(result.structuredContent, indent=2)) + print(json.dumps(result.structured_content, indent=2)) # Also show the text content for comparison print("\nText content for last result:") @@ -204,11 +204,11 @@ async def print_schemas() -> None: print(f"\nTool: {tool.name}") print(f"Description: {tool.description}") print("Input Schema:") - print(json.dumps(tool.inputSchema, indent=2)) + print(json.dumps(tool.input_schema, indent=2)) - if tool.outputSchema: + if tool.output_schema: print("Output Schema:") - print(json.dumps(tool.outputSchema, indent=2)) + print(json.dumps(tool.output_schema, indent=2)) else: print("Output Schema: None (returns unstructured content)") diff --git a/examples/servers/everything-server/mcp_everything_server/server.py b/examples/servers/everything-server/mcp_everything_server/server.py index 59c60ea654..bdc20cdcf2 100644 --- a/examples/servers/everything-server/mcp_everything_server/server.py +++ b/examples/servers/everything-server/mcp_everything_server/server.py @@ -98,13 +98,13 @@ def test_simple_text() -> str: @mcp.tool() def test_image_content() -> list[ImageContent]: """Tests image content response""" - return [ImageContent(type="image", data=TEST_IMAGE_BASE64, mimeType="image/png")] + return [ImageContent(type="image", data=TEST_IMAGE_BASE64, mime_type="image/png")] @mcp.tool() def test_audio_content() -> list[AudioContent]: """Tests audio content response""" - return [AudioContent(type="audio", data=TEST_AUDIO_BASE64, mimeType="audio/wav")] + return [AudioContent(type="audio", data=TEST_AUDIO_BASE64, mime_type="audio/wav")] @mcp.tool() @@ -115,7 +115,7 @@ def test_embedded_resource() -> list[EmbeddedResource]: type="resource", resource=TextResourceContents( uri="test://embedded-resource", - mimeType="text/plain", + mime_type="text/plain", text="This is an embedded resource content.", ), ) @@ -127,12 +127,12 @@ def test_multiple_content_types() -> list[TextContent | ImageContent | EmbeddedR """Tests response with multiple content types (text, image, resource)""" return [ TextContent(type="text", text="Multiple content types test:"), - ImageContent(type="image", data=TEST_IMAGE_BASE64, mimeType="image/png"), + ImageContent(type="image", data=TEST_IMAGE_BASE64, mime_type="image/png"), EmbeddedResource( type="resource", resource=TextResourceContents( uri="test://mixed-content-resource", - mimeType="application/json", + mime_type="application/json", text='{"test": "data", "value": 123}', ), ), @@ -164,7 +164,7 @@ async def test_tool_with_progress(ctx: Context[ServerSession, None]) -> str: await ctx.report_progress(progress=100, total=100, message="Completed step 100 of 100") # Return progress token as string - progress_token = ctx.request_context.meta.progressToken if ctx.request_context and ctx.request_context.meta else 0 + progress_token = ctx.request_context.meta.progress_token if ctx.request_context and ctx.request_context.meta else 0 return str(progress_token) @@ -373,7 +373,7 @@ def test_prompt_with_embedded_resource(resourceUri: str) -> list[UserMessage]: type="resource", resource=TextResourceContents( uri=resourceUri, - mimeType="text/plain", + mime_type="text/plain", text="Embedded resource content for testing.", ), ), @@ -386,7 +386,7 @@ def test_prompt_with_embedded_resource(resourceUri: str) -> list[UserMessage]: def test_prompt_with_image() -> list[UserMessage]: """A prompt that includes image content""" return [ - UserMessage(role="user", content=ImageContent(type="image", data=TEST_IMAGE_BASE64, mimeType="image/png")), + UserMessage(role="user", content=ImageContent(type="image", data=TEST_IMAGE_BASE64, mime_type="image/png")), UserMessage(role="user", content=TextContent(type="text", text="Please analyze the image above.")), ] @@ -427,7 +427,7 @@ async def _handle_completion( """Handle completion requests""" # Basic completion support - returns empty array for conformance # Real implementations would provide contextual suggestions - return Completion(values=[], total=0, hasMore=False) + return Completion(values=[], total=0, has_more=False) # CLI diff --git a/examples/servers/simple-resource/mcp_simple_resource/server.py b/examples/servers/simple-resource/mcp_simple_resource/server.py index 26bc316399..f1ab4e4dcd 100644 --- a/examples/servers/simple-resource/mcp_simple_resource/server.py +++ b/examples/servers/simple-resource/mcp_simple_resource/server.py @@ -40,7 +40,7 @@ async def list_resources() -> list[types.Resource]: name=name, title=SAMPLE_RESOURCES[name]["title"], description=f"A sample text resource named {name}", - mimeType="text/plain", + mime_type="text/plain", ) for name in SAMPLE_RESOURCES.keys() ] diff --git a/examples/snippets/clients/parsing_tool_results.py b/examples/snippets/clients/parsing_tool_results.py index 5158735461..b166406774 100644 --- a/examples/snippets/clients/parsing_tool_results.py +++ b/examples/snippets/clients/parsing_tool_results.py @@ -22,9 +22,9 @@ async def parse_tool_results(): # Example 2: Parsing structured content from JSON tools result = await session.call_tool("get_user", {"id": "123"}) - if hasattr(result, "structuredContent") and result.structuredContent: + if hasattr(result, "structured_content") and result.structured_content: # Access structured data directly - user_data = result.structuredContent + user_data = result.structured_content print(f"User: {user_data.get('name')}, Age: {user_data.get('age')}") # Example 3: Parsing embedded resources @@ -41,11 +41,11 @@ async def parse_tool_results(): result = await session.call_tool("generate_chart", {"data": [1, 2, 3]}) for content in result.content: if isinstance(content, types.ImageContent): - print(f"Image ({content.mimeType}): {len(content.data)} bytes") + print(f"Image ({content.mime_type}): {len(content.data)} bytes") # Example 5: Handling errors result = await session.call_tool("failing_tool", {}) - if result.isError: + if result.is_error: print("Tool execution failed!") for content in result.content: if isinstance(content, types.TextContent): diff --git a/examples/snippets/clients/stdio_client.py b/examples/snippets/clients/stdio_client.py index ac978035d4..08d7cfcdbc 100644 --- a/examples/snippets/clients/stdio_client.py +++ b/examples/snippets/clients/stdio_client.py @@ -32,7 +32,7 @@ async def handle_sampling_message( text="Hello, world! from model", ), model="gpt-3.5-turbo", - stopReason="endTurn", + stop_reason="endTurn", ) @@ -70,7 +70,7 @@ async def run(): result_unstructured = result.content[0] if isinstance(result_unstructured, types.TextContent): print(f"Tool result: {result_unstructured.text}") - result_structured = result.structuredContent + result_structured = result.structured_content print(f"Structured tool result: {result_structured}") diff --git a/examples/snippets/clients/url_elicitation_client.py b/examples/snippets/clients/url_elicitation_client.py index 56457512c6..300c38fa0c 100644 --- a/examples/snippets/clients/url_elicitation_client.py +++ b/examples/snippets/clients/url_elicitation_client.py @@ -154,7 +154,7 @@ async def call_tool_with_error_handling( result = await session.call_tool(tool_name, arguments) # Check if the tool returned an error in the result - if result.isError: + if result.is_error: print(f"Tool returned error: {result.content}") return None diff --git a/src/mcp/client/experimental/tasks.py b/src/mcp/client/experimental/tasks.py index ce9c387462..4eb8dcf5da 100644 --- a/src/mcp/client/experimental/tasks.py +++ b/src/mcp/client/experimental/tasks.py @@ -8,7 +8,7 @@ Example: # Call a tool as a task result = await session.experimental.call_tool_as_task("tool_name", {"arg": "value"}) - task_id = result.task.taskId + task_id = result.task.task_id # Get task status status = await session.experimental.get_task(task_id) @@ -77,7 +77,7 @@ async def call_tool_as_task( result = await session.experimental.call_tool_as_task( "long_running_tool", {"input": "data"} ) - task_id = result.task.taskId + task_id = result.task.task_id # Poll for completion while True: @@ -120,7 +120,7 @@ async def get_task(self, task_id: str) -> types.GetTaskResult: return await self._session.send_request( types.ClientRequest( types.GetTaskRequest( - params=types.GetTaskRequestParams(taskId=task_id), + params=types.GetTaskRequestParams(task_id=task_id), ) ), types.GetTaskResult, @@ -148,7 +148,7 @@ async def get_task_result( return await self._session.send_request( types.ClientRequest( types.GetTaskPayloadRequest( - params=types.GetTaskPayloadRequestParams(taskId=task_id), + params=types.GetTaskPayloadRequestParams(task_id=task_id), ) ), result_type, @@ -188,7 +188,7 @@ async def cancel_task(self, task_id: str) -> types.CancelTaskResult: return await self._session.send_request( types.ClientRequest( types.CancelTaskRequest( - params=types.CancelTaskRequestParams(taskId=task_id), + params=types.CancelTaskRequestParams(task_id=task_id), ) ), types.CancelTaskResult, diff --git a/src/mcp/client/session.py b/src/mcp/client/session.py index 3d6a3979d0..637a3d1b15 100644 --- a/src/mcp/client/session.py +++ b/src/mcp/client/session.py @@ -161,7 +161,7 @@ async def initialize(self) -> types.InitializeResult: # TODO: Should this be based on whether we # _will_ send notifications, or only whether # they're supported? - types.RootsCapability(listChanged=True) + types.RootsCapability(list_changed=True) if self._list_roots_callback is not _default_list_roots_callback else None ) @@ -170,7 +170,7 @@ async def initialize(self) -> types.InitializeResult: types.ClientRequest( types.InitializeRequest( params=types.InitializeRequestParams( - protocolVersion=types.LATEST_PROTOCOL_VERSION, + protocol_version=types.LATEST_PROTOCOL_VERSION, capabilities=types.ClientCapabilities( sampling=sampling, elicitation=elicitation, @@ -178,15 +178,15 @@ async def initialize(self) -> types.InitializeResult: roots=roots, tasks=self._task_handlers.build_capability(), ), - clientInfo=self._client_info, + client_info=self._client_info, ), ) ), types.InitializeResult, ) - if result.protocolVersion not in SUPPORTED_PROTOCOL_VERSIONS: - raise RuntimeError(f"Unsupported protocol version from the server: {result.protocolVersion}") + if result.protocol_version not in SUPPORTED_PROTOCOL_VERSIONS: + raise RuntimeError(f"Unsupported protocol version from the server: {result.protocol_version}") self._server_capabilities = result.capabilities @@ -235,7 +235,7 @@ async def send_progress_notification( types.ClientNotification( types.ProgressNotification( params=types.ProgressNotificationParams( - progressToken=progress_token, + progress_token=progress_token, progress=progress, total=total, message=message, @@ -326,7 +326,7 @@ async def call_tool( progress_callback=progress_callback, ) - if not result.isError: + if not result.is_error: await self._validate_tool_result(name, result) return result @@ -346,12 +346,12 @@ async def _validate_tool_result(self, name: str, result: types.CallToolResult) - if output_schema is not None: from jsonschema import SchemaError, ValidationError, validate - if result.structuredContent is None: + if result.structured_content is None: raise RuntimeError( f"Tool {name} has an output schema but did not return structured content" ) # pragma: no cover try: - validate(result.structuredContent, output_schema) + validate(result.structured_content, output_schema) except ValidationError as e: raise RuntimeError(f"Invalid structured content returned by tool {name}: {e}") # pragma: no cover except SchemaError as e: # pragma: no cover @@ -418,7 +418,7 @@ async def list_tools(self, *, params: types.PaginatedRequestParams | None = None # Cache tool output schemas for future validation # Note: don't clear the cache, as we may be using a cursor for tool in result.tools: - self._tool_output_schemas[tool.name] = tool.outputSchema + self._tool_output_schemas[tool.name] = tool.output_schema return result diff --git a/src/mcp/client/session_group.py b/src/mcp/client/session_group.py index 46dc3b560a..47137294d6 100644 --- a/src/mcp/client/session_group.py +++ b/src/mcp/client/session_group.py @@ -119,9 +119,9 @@ class _ComponentNames(BaseModel): _exit_stack: contextlib.AsyncExitStack _session_exit_stacks: dict[mcp.ClientSession, contextlib.AsyncExitStack] - # Optional fn consuming (component_name, serverInfo) for custom names. + # Optional fn consuming (component_name, server_info) for custom names. # This is provide a means to mitigate naming conflicts across servers. - # Example: (tool_name, serverInfo) => "{result.serverInfo.name}.{tool_name}" + # Example: (tool_name, server_info) => "{result.server_info.name}.{tool_name}" _ComponentNameHook: TypeAlias = Callable[[str, types.Implementation], str] _component_name_hook: _ComponentNameHook | None @@ -324,7 +324,7 @@ async def _establish_session( # main _exit_stack. await self._exit_stack.enter_async_context(session_stack) - return result.serverInfo, session + return result.server_info, session except Exception: # pragma: no cover # If anything during this setup fails, ensure the session-specific # stack is closed. diff --git a/src/mcp/client/sse.py b/src/mcp/client/sse.py index 4b0bbbc1e7..50c6a468d6 100644 --- a/src/mcp/client/sse.py +++ b/src/mcp/client/sse.py @@ -110,7 +110,7 @@ async def sse_reader( continue try: message = types.JSONRPCMessage.model_validate_json( # noqa: E501 - sse.data + sse.data, by_name=False ) logger.debug(f"Received server message: {message}") except Exception as exc: # pragma: no cover diff --git a/src/mcp/client/stdio/__init__.py b/src/mcp/client/stdio/__init__.py index 0d76bb958b..a0af4168b4 100644 --- a/src/mcp/client/stdio/__init__.py +++ b/src/mcp/client/stdio/__init__.py @@ -152,7 +152,7 @@ async def stdout_reader(): for line in lines: try: - message = types.JSONRPCMessage.model_validate_json(line) + message = types.JSONRPCMessage.model_validate_json(line, by_name=False) except Exception as exc: # pragma: no cover logger.exception("Failed to parse JSONRPC message from server") await read_stream_writer.send(exc) diff --git a/src/mcp/client/streamable_http.py b/src/mcp/client/streamable_http.py index 06acf19313..7e53681010 100644 --- a/src/mcp/client/streamable_http.py +++ b/src/mcp/client/streamable_http.py @@ -113,8 +113,8 @@ def _maybe_extract_protocol_version_from_message(self, message: JSONRPCMessage) if isinstance(message.root, JSONRPCResponse) and message.root.result: # pragma: no branch try: # Parse the result as InitializeResult for type safety - init_result = InitializeResult.model_validate(message.root.result) - self.protocol_version = str(init_result.protocolVersion) + init_result = InitializeResult.model_validate(message.root.result, by_name=False) + self.protocol_version = str(init_result.protocol_version) logger.info(f"Negotiated protocol version: {self.protocol_version}") except Exception: # pragma: no cover logger.warning("Failed to parse initialization response as InitializeResult", exc_info=True) @@ -137,7 +137,7 @@ async def _handle_sse_event( await resumption_callback(sse.id) return False try: - message = JSONRPCMessage.model_validate_json(sse.data) + message = JSONRPCMessage.model_validate_json(sse.data, by_name=False) logger.debug(f"SSE message: {message}") # Extract protocol version from initialization response @@ -291,7 +291,7 @@ async def _handle_json_response( """Handle JSON response from the server.""" try: content = await response.aread() - message = JSONRPCMessage.model_validate_json(content) + message = JSONRPCMessage.model_validate_json(content, by_name=False) # Extract protocol version from initialization response if is_initialization: diff --git a/src/mcp/client/websocket.py b/src/mcp/client/websocket.py index 6aa6eb5e95..4596410e72 100644 --- a/src/mcp/client/websocket.py +++ b/src/mcp/client/websocket.py @@ -53,7 +53,7 @@ async def ws_reader(): async with read_stream_writer: async for raw_text in ws: try: - message = types.JSONRPCMessage.model_validate_json(raw_text) + message = types.JSONRPCMessage.model_validate_json(raw_text, by_name=False) session_message = SessionMessage(message) await read_stream_writer.send(session_message) except ValidationError as exc: # pragma: no cover diff --git a/src/mcp/server/elicitation.py b/src/mcp/server/elicitation.py index 49195415bf..58e9fe4485 100644 --- a/src/mcp/server/elicitation.py +++ b/src/mcp/server/elicitation.py @@ -125,7 +125,7 @@ async def elicit_with_validation( result = await session.elicit_form( message=message, - requestedSchema=json_schema, + requested_schema=json_schema, related_request_id=related_request_id, ) diff --git a/src/mcp/server/experimental/request_context.py b/src/mcp/server/experimental/request_context.py index 78e75beb6a..7e80d792f7 100644 --- a/src/mcp/server/experimental/request_context.py +++ b/src/mcp/server/experimental/request_context.py @@ -122,7 +122,7 @@ def validate_for_tool( Returns: None if valid, ErrorData if invalid and raise_error=False """ - mode = tool.execution.taskSupport if tool.execution else None + mode = tool.execution.task_support if tool.execution else None return self.validate_task_mode(mode, raise_error=raise_error) def can_use_tool(self, tool_task_mode: TaskExecutionMode | None) -> bool: diff --git a/src/mcp/server/experimental/session_features.py b/src/mcp/server/experimental/session_features.py index 4842da5175..57efab75b2 100644 --- a/src/mcp/server/experimental/session_features.py +++ b/src/mcp/server/experimental/session_features.py @@ -52,7 +52,7 @@ async def get_task(self, task_id: str) -> types.GetTaskResult: GetTaskResult containing the task status """ return await self._session.send_request( - types.ServerRequest(types.GetTaskRequest(params=types.GetTaskRequestParams(taskId=task_id))), + types.ServerRequest(types.GetTaskRequest(params=types.GetTaskRequestParams(task_id=task_id))), types.GetTaskResult, ) @@ -72,7 +72,7 @@ async def get_task_result( The task result, validated against result_type """ return await self._session.send_request( - types.ServerRequest(types.GetTaskPayloadRequest(params=types.GetTaskPayloadRequestParams(taskId=task_id))), + types.ServerRequest(types.GetTaskPayloadRequest(params=types.GetTaskPayloadRequestParams(task_id=task_id))), result_type, ) @@ -97,7 +97,7 @@ async def poll_task(self, task_id: str) -> AsyncIterator[types.GetTaskResult]: async def elicit_as_task( self, message: str, - requestedSchema: types.ElicitRequestedSchema, + requested_schema: types.ElicitRequestedSchema, *, ttl: int = 60000, ) -> types.ElicitResult: @@ -113,7 +113,7 @@ async def elicit_as_task( Args: message: The message to present to the user - requestedSchema: Schema defining the expected response + requested_schema: Schema defining the expected response ttl: Task time-to-live in milliseconds Returns: @@ -130,7 +130,7 @@ async def elicit_as_task( types.ElicitRequest( params=types.ElicitRequestFormParams( message=message, - requestedSchema=requestedSchema, + requested_schema=requested_schema, task=types.TaskMetadata(ttl=ttl), ) ) @@ -138,7 +138,7 @@ async def elicit_as_task( types.CreateTaskResult, ) - task_id = create_result.task.taskId + task_id = create_result.task.task_id async for _ in self.poll_task(task_id): pass @@ -196,15 +196,15 @@ async def create_message_as_task( types.CreateMessageRequest( params=types.CreateMessageRequestParams( messages=messages, - maxTokens=max_tokens, - systemPrompt=system_prompt, - includeContext=include_context, + max_tokens=max_tokens, + system_prompt=system_prompt, + include_context=include_context, temperature=temperature, - stopSequences=stop_sequences, + stop_sequences=stop_sequences, metadata=metadata, - modelPreferences=model_preferences, + model_preferences=model_preferences, tools=tools, - toolChoice=tool_choice, + tool_choice=tool_choice, task=types.TaskMetadata(ttl=ttl), ) ) @@ -212,7 +212,7 @@ async def create_message_as_task( types.CreateTaskResult, ) - task_id = create_result.task.taskId + task_id = create_result.task.task_id async for _ in self.poll_task(task_id): pass diff --git a/src/mcp/server/experimental/task_context.py b/src/mcp/server/experimental/task_context.py index e6e14fc938..4eadab2164 100644 --- a/src/mcp/server/experimental/task_context.py +++ b/src/mcp/server/experimental/task_context.py @@ -65,7 +65,7 @@ async def my_task_work(task: ServerTaskContext) -> CallToolResult: result = await task.elicit( message="Continue?", - requestedSchema={"type": "object", "properties": {"ok": {"type": "boolean"}}} + requested_schema={"type": "object", "properties": {"ok": {"type": "boolean"}}} ) if result.content.get("ok"): @@ -165,13 +165,13 @@ async def _send_notification(self) -> None: ServerNotification( TaskStatusNotification( params=TaskStatusNotificationParams( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=task.pollInterval, + poll_interval=task.poll_interval, ) ) ) @@ -202,7 +202,7 @@ def _check_sampling_capability(self) -> None: async def elicit( self, message: str, - requestedSchema: ElicitRequestedSchema, + requested_schema: ElicitRequestedSchema, ) -> ElicitResult: """ Send an elicitation request via the task message queue. @@ -217,7 +217,7 @@ async def elicit( Args: message: The message to present to the user - requestedSchema: Schema defining the expected response structure + requested_schema: Schema defining the expected response structure Returns: The client's response @@ -236,7 +236,7 @@ async def elicit( # Build the request using session's helper request = self._session._build_elicit_form_request( # pyright: ignore[reportPrivateUsage] message=message, - requestedSchema=requestedSchema, + requested_schema=requested_schema, related_task_id=self.task_id, ) request_id: RequestId = request.id @@ -430,7 +430,7 @@ async def create_message( async def elicit_as_task( self, message: str, - requestedSchema: ElicitRequestedSchema, + requested_schema: ElicitRequestedSchema, *, ttl: int = 60000, ) -> ElicitResult: @@ -444,7 +444,7 @@ async def elicit_as_task( Args: message: The message to present to the user - requestedSchema: Schema defining the expected response structure + requested_schema: Schema defining the expected response structure ttl: Task time-to-live in milliseconds for the client's task Returns: @@ -465,7 +465,7 @@ async def elicit_as_task( request = self._session._build_elicit_form_request( # pyright: ignore[reportPrivateUsage] message=message, - requestedSchema=requestedSchema, + requested_schema=requested_schema, related_task_id=self.task_id, task=TaskMetadata(ttl=ttl), ) @@ -486,7 +486,7 @@ async def elicit_as_task( # Wait for initial response (CreateTaskResult from client) response_data = await resolver.wait() create_result = CreateTaskResult.model_validate(response_data) - client_task_id = create_result.task.taskId + client_task_id = create_result.task.task_id # Poll the client's task using session.experimental async for _ in self._session.experimental.poll_task(client_task_id): @@ -592,7 +592,7 @@ async def create_message_as_task( # Wait for initial response (CreateTaskResult from client) response_data = await resolver.wait() create_result = CreateTaskResult.model_validate(response_data) - client_task_id = create_result.task.taskId + client_task_id = create_result.task.task_id # Poll the client's task using session.experimental async for _ in self._session.experimental.poll_task(client_task_id): diff --git a/src/mcp/server/experimental/task_result_handler.py b/src/mcp/server/experimental/task_result_handler.py index 0b869216e8..85b1259a77 100644 --- a/src/mcp/server/experimental/task_result_handler.py +++ b/src/mcp/server/experimental/task_result_handler.py @@ -106,7 +106,7 @@ async def handle( Returns: GetTaskPayloadResult with the task's final payload """ - task_id = request.params.taskId + task_id = request.params.task_id while True: task = await self._store.get_task(task_id) @@ -126,7 +126,7 @@ async def handle( # GetTaskPayloadResult is a Result with extra="allow" # The stored result contains the actual payload data # Per spec: tasks/result MUST include _meta with related-task metadata - related_task = RelatedTaskMetadata(taskId=task_id) + related_task = RelatedTaskMetadata(task_id=task_id) related_task_meta: dict[str, Any] = {RELATED_TASK_METADATA_KEY: related_task.model_dump(by_alias=True)} if result is not None: result_data = result.model_dump(by_alias=True) diff --git a/src/mcp/server/fastmcp/server.py b/src/mcp/server/fastmcp/server.py index 0d18df1131..d872e7d7bf 100644 --- a/src/mcp/server/fastmcp/server.py +++ b/src/mcp/server/fastmcp/server.py @@ -305,8 +305,8 @@ async def list_tools(self) -> list[MCPTool]: name=info.name, title=info.title, description=info.description, - inputSchema=info.parameters, - outputSchema=info.output_schema, + input_schema=info.parameters, + output_schema=info.output_schema, annotations=info.annotations, icons=info.icons, _meta=info.meta, @@ -340,7 +340,7 @@ async def list_resources(self) -> list[MCPResource]: name=resource.name or "", title=resource.title, description=resource.description, - mimeType=resource.mime_type, + mime_type=resource.mime_type, icons=resource.icons, annotations=resource.annotations, _meta=resource.meta, @@ -352,11 +352,11 @@ async def list_resource_templates(self) -> list[MCPResourceTemplate]: templates = self._resource_manager.list_templates() return [ MCPResourceTemplate( - uriTemplate=template.uri_template, + uri_template=template.uri_template, name=template.name, title=template.title, description=template.description, - mimeType=template.mime_type, + mime_type=template.mime_type, icons=template.icons, annotations=template.annotations, _meta=template.meta, @@ -1104,7 +1104,7 @@ async def report_progress(self, progress: float, total: float | None = None, mes total: Optional total value e.g. 100 message: Optional message e.g. Starting render... """ - progress_token = self.request_context.meta.progressToken if self.request_context.meta else None + progress_token = self.request_context.meta.progress_token if self.request_context.meta else None if progress_token is None: # pragma: no cover return diff --git a/src/mcp/server/fastmcp/utilities/func_metadata.py b/src/mcp/server/fastmcp/utilities/func_metadata.py index fa443d2fcb..65b31e2b53 100644 --- a/src/mcp/server/fastmcp/utilities/func_metadata.py +++ b/src/mcp/server/fastmcp/utilities/func_metadata.py @@ -113,7 +113,7 @@ def convert_result(self, result: Any) -> Any: if isinstance(result, CallToolResult): if self.output_schema is not None: assert self.output_model is not None, "Output model must be set if output schema is defined" - self.output_model.model_validate(result.structuredContent) + self.output_model.model_validate(result.structured_content) return result unstructured_content = _convert_to_content(result) diff --git a/src/mcp/server/lowlevel/experimental.py b/src/mcp/server/lowlevel/experimental.py index 42353e4ea0..1ff01b01d7 100644 --- a/src/mcp/server/lowlevel/experimental.py +++ b/src/mcp/server/lowlevel/experimental.py @@ -134,23 +134,23 @@ def _register_default_task_handlers(self) -> None: if GetTaskRequest not in self._request_handlers: async def _default_get_task(req: GetTaskRequest) -> ServerResult: - task = await support.store.get_task(req.params.taskId) + task = await support.store.get_task(req.params.task_id) if task is None: raise McpError( ErrorData( code=INVALID_PARAMS, - message=f"Task not found: {req.params.taskId}", + message=f"Task not found: {req.params.task_id}", ) ) return ServerResult( GetTaskResult( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=task.pollInterval, + poll_interval=task.poll_interval, ) ) @@ -172,7 +172,7 @@ async def _default_get_task_result(req: GetTaskPayloadRequest) -> ServerResult: async def _default_list_tasks(req: ListTasksRequest) -> ServerResult: cursor = req.params.cursor if req.params else None tasks, next_cursor = await support.store.list_tasks(cursor) - return ServerResult(ListTasksResult(tasks=tasks, nextCursor=next_cursor)) + return ServerResult(ListTasksResult(tasks=tasks, next_cursor=next_cursor)) self._request_handlers[ListTasksRequest] = _default_list_tasks @@ -180,7 +180,7 @@ async def _default_list_tasks(req: ListTasksRequest) -> ServerResult: if CancelTaskRequest not in self._request_handlers: async def _default_cancel_task(req: CancelTaskRequest) -> ServerResult: - result = await cancel_task(support.store, req.params.taskId) + result = await cancel_task(support.store, req.params.task_id) return ServerResult(result) self._request_handlers[CancelTaskRequest] = _default_cancel_task diff --git a/src/mcp/server/lowlevel/server.py b/src/mcp/server/lowlevel/server.py index 491ff7d0b3..afe671f0ec 100644 --- a/src/mcp/server/lowlevel/server.py +++ b/src/mcp/server/lowlevel/server.py @@ -209,17 +209,17 @@ def get_capabilities( # Set prompt capabilities if handler exists if types.ListPromptsRequest in self.request_handlers: - prompts_capability = types.PromptsCapability(listChanged=notification_options.prompts_changed) + prompts_capability = types.PromptsCapability(list_changed=notification_options.prompts_changed) # Set resource capabilities if handler exists if types.ListResourcesRequest in self.request_handlers: resources_capability = types.ResourcesCapability( - subscribe=False, listChanged=notification_options.resources_changed + subscribe=False, list_changed=notification_options.resources_changed ) # Set tool capabilities if handler exists if types.ListToolsRequest in self.request_handlers: - tools_capability = types.ToolsCapability(listChanged=notification_options.tools_changed) + tools_capability = types.ToolsCapability(list_changed=notification_options.tools_changed) # Set logging capabilities if handler exists if types.SetLevelRequest in self.request_handlers: # pragma: no cover @@ -327,7 +327,7 @@ def decorator(func: Callable[[], Awaitable[list[types.ResourceTemplate]]]): async def handler(_: Any): templates = await func() - return types.ServerResult(types.ListResourceTemplatesResult(resourceTemplates=templates)) + return types.ServerResult(types.ListResourceTemplatesResult(resource_templates=templates)) self.request_handlers[types.ListResourceTemplatesRequest] = handler return func @@ -351,14 +351,14 @@ def create_content(data: str | bytes, mime_type: str | None, meta: dict[str, Any return types.TextResourceContents( uri=req.params.uri, text=data, - mimeType=mime_type or "text/plain", + mime_type=mime_type or "text/plain", **meta_kwargs, ) case bytes() as data: # pragma: no cover return types.BlobResourceContents( uri=req.params.uri, blob=base64.b64encode(data).decode(), - mimeType=mime_type or "application/octet-stream", + mime_type=mime_type or "application/octet-stream", **meta_kwargs, ) @@ -474,7 +474,7 @@ def _make_error_result(self, error_message: str) -> types.ServerResult: return types.ServerResult( types.CallToolResult( content=[types.TextContent(type="text", text=error_message)], - isError=True, + is_error=True, ) ) @@ -532,7 +532,7 @@ async def handler(req: types.CallToolRequest): # input validation if validate_input and tool: try: - jsonschema.validate(instance=arguments, schema=tool.inputSchema) + jsonschema.validate(instance=arguments, schema=tool.input_schema) except jsonschema.ValidationError as e: return self._make_error_result(f"Input validation error: {e.message}") @@ -562,14 +562,14 @@ async def handler(req: types.CallToolRequest): return self._make_error_result(f"Unexpected return type from tool: {type(results).__name__}") # output validation - if tool and tool.outputSchema is not None: + if tool and tool.output_schema is not None: if maybe_structured_content is None: return self._make_error_result( "Output validation error: outputSchema defined but no structured output returned" ) else: try: - jsonschema.validate(instance=maybe_structured_content, schema=tool.outputSchema) + jsonschema.validate(instance=maybe_structured_content, schema=tool.output_schema) except jsonschema.ValidationError as e: return self._make_error_result(f"Output validation error: {e.message}") @@ -577,8 +577,8 @@ async def handler(req: types.CallToolRequest): return types.ServerResult( types.CallToolResult( content=list(unstructured_content), - structuredContent=maybe_structured_content, - isError=False, + structured_content=maybe_structured_content, + is_error=False, ) ) except UrlElicitationRequiredError: @@ -601,7 +601,7 @@ def decorator( async def handler(req: types.ProgressNotification): await func( - req.params.progressToken, + req.params.progress_token, req.params.progress, req.params.total, req.params.message, @@ -633,7 +633,7 @@ async def handler(req: types.CompleteRequest): types.CompleteResult( completion=completion if completion is not None - else types.Completion(values=[], total=None, hasMore=None), + else types.Completion(values=[], total=None, has_more=None), ) ) diff --git a/src/mcp/server/session.py b/src/mcp/server/session.py index b6fd3a2e8f..eebec5bb47 100644 --- a/src/mcp/server/session.py +++ b/src/mcp/server/session.py @@ -129,7 +129,7 @@ def check_client_capability(self, capability: types.ClientCapabilities) -> bool: if capability.roots is not None: if client_caps.roots is None: return False - if capability.roots.listChanged and not client_caps.roots.listChanged: + if capability.roots.list_changed and not client_caps.roots.list_changed: return False if capability.sampling is not None: @@ -165,23 +165,23 @@ async def _receive_loop(self) -> None: async def _received_request(self, responder: RequestResponder[types.ClientRequest, types.ServerResult]): match responder.request.root: case types.InitializeRequest(params=params): - requested_version = params.protocolVersion + requested_version = params.protocol_version self._initialization_state = InitializationState.Initializing self._client_params = params with responder: await responder.respond( types.ServerResult( types.InitializeResult( - protocolVersion=requested_version + protocol_version=requested_version if requested_version in SUPPORTED_PROTOCOL_VERSIONS else types.LATEST_PROTOCOL_VERSION, capabilities=self._init_options.capabilities, - serverInfo=types.Implementation( + server_info=types.Implementation( name=self._init_options.server_name, title=self._init_options.title, description=self._init_options.description, version=self._init_options.server_version, - websiteUrl=self._init_options.website_url, + website_url=self._init_options.website_url, icons=self._init_options.icons, ), instructions=self._init_options.instructions, @@ -327,15 +327,15 @@ async def create_message( types.CreateMessageRequest( params=types.CreateMessageRequestParams( messages=messages, - systemPrompt=system_prompt, - includeContext=include_context, + system_prompt=system_prompt, + include_context=include_context, temperature=temperature, - maxTokens=max_tokens, - stopSequences=stop_sequences, + max_tokens=max_tokens, + stop_sequences=stop_sequences, metadata=metadata, - modelPreferences=model_preferences, + model_preferences=model_preferences, tools=tools, - toolChoice=tool_choice, + tool_choice=tool_choice, ), ) ) @@ -366,14 +366,14 @@ async def list_roots(self) -> types.ListRootsResult: async def elicit( self, message: str, - requestedSchema: types.ElicitRequestedSchema, + requested_schema: types.ElicitRequestedSchema, related_request_id: types.RequestId | None = None, ) -> types.ElicitResult: """Send a form mode elicitation/create request. Args: message: The message to present to the user - requestedSchema: Schema defining the expected response structure + requested_schema: Schema defining the expected response structure related_request_id: Optional ID of the request that triggered this elicitation Returns: @@ -383,19 +383,19 @@ async def elicit( This method is deprecated in favor of elicit_form(). It remains for backward compatibility but new code should use elicit_form(). """ - return await self.elicit_form(message, requestedSchema, related_request_id) + return await self.elicit_form(message, requested_schema, related_request_id) async def elicit_form( self, message: str, - requestedSchema: types.ElicitRequestedSchema, + requested_schema: types.ElicitRequestedSchema, related_request_id: types.RequestId | None = None, ) -> types.ElicitResult: """Send a form mode elicitation/create request. Args: message: The message to present to the user - requestedSchema: Schema defining the expected response structure + requested_schema: Schema defining the expected response structure related_request_id: Optional ID of the request that triggered this elicitation Returns: @@ -411,7 +411,7 @@ async def elicit_form( types.ElicitRequest( params=types.ElicitRequestFormParams( message=message, - requestedSchema=requestedSchema, + requested_schema=requested_schema, ), ) ), @@ -451,7 +451,7 @@ async def elicit_url( params=types.ElicitRequestURLParams( message=message, url=url, - elicitationId=elicitation_id, + elicitation_id=elicitation_id, ), ) ), @@ -479,7 +479,7 @@ async def send_progress_notification( types.ServerNotification( types.ProgressNotification( params=types.ProgressNotificationParams( - progressToken=progress_token, + progress_token=progress_token, progress=progress, total=total, message=message, @@ -519,7 +519,7 @@ async def send_elicit_complete( await self.send_notification( types.ServerNotification( types.ElicitCompleteNotification( - params=types.ElicitCompleteNotificationParams(elicitationId=elicitation_id) + params=types.ElicitCompleteNotificationParams(elicitation_id=elicitation_id) ) ), related_request_id, @@ -528,7 +528,7 @@ async def send_elicit_complete( def _build_elicit_form_request( self, message: str, - requestedSchema: types.ElicitRequestedSchema, + requested_schema: types.ElicitRequestedSchema, related_task_id: str | None = None, task: types.TaskMetadata | None = None, ) -> types.JSONRPCRequest: @@ -536,7 +536,7 @@ def _build_elicit_form_request( Args: message: The message to present to the user - requestedSchema: Schema defining the expected response structure + requested_schema: Schema defining the expected response structure related_task_id: If provided, adds io.modelcontextprotocol/related-task metadata task: If provided, makes this a task-augmented request @@ -545,7 +545,7 @@ def _build_elicit_form_request( """ params = types.ElicitRequestFormParams( message=message, - requestedSchema=requestedSchema, + requested_schema=requested_schema, task=task, ) params_data = params.model_dump(by_alias=True, mode="json", exclude_none=True) @@ -556,7 +556,7 @@ def _build_elicit_form_request( if "_meta" not in params_data: # pragma: no cover params_data["_meta"] = {} params_data["_meta"][RELATED_TASK_METADATA_KEY] = types.RelatedTaskMetadata( - taskId=related_task_id + task_id=related_task_id ).model_dump(by_alias=True) request_id = f"task-{related_task_id}-{id(params)}" if related_task_id else self._request_id @@ -591,7 +591,7 @@ def _build_elicit_url_request( params = types.ElicitRequestURLParams( message=message, url=url, - elicitationId=elicitation_id, + elicitation_id=elicitation_id, ) params_data = params.model_dump(by_alias=True, mode="json", exclude_none=True) @@ -601,7 +601,7 @@ def _build_elicit_url_request( if "_meta" not in params_data: # pragma: no cover params_data["_meta"] = {} params_data["_meta"][RELATED_TASK_METADATA_KEY] = types.RelatedTaskMetadata( - taskId=related_task_id + task_id=related_task_id ).model_dump(by_alias=True) request_id = f"task-{related_task_id}-{id(params)}" if related_task_id else self._request_id @@ -652,15 +652,15 @@ def _build_create_message_request( """ params = types.CreateMessageRequestParams( messages=messages, - systemPrompt=system_prompt, - includeContext=include_context, + system_prompt=system_prompt, + include_context=include_context, temperature=temperature, - maxTokens=max_tokens, - stopSequences=stop_sequences, + max_tokens=max_tokens, + stop_sequences=stop_sequences, metadata=metadata, - modelPreferences=model_preferences, + model_preferences=model_preferences, tools=tools, - toolChoice=tool_choice, + tool_choice=tool_choice, task=task, ) params_data = params.model_dump(by_alias=True, mode="json", exclude_none=True) @@ -671,7 +671,7 @@ def _build_create_message_request( if "_meta" not in params_data: # pragma: no cover params_data["_meta"] = {} params_data["_meta"][RELATED_TASK_METADATA_KEY] = types.RelatedTaskMetadata( - taskId=related_task_id + task_id=related_task_id ).model_dump(by_alias=True) request_id = f"task-{related_task_id}-{id(params)}" if related_task_id else self._request_id diff --git a/src/mcp/server/sse.py b/src/mcp/server/sse.py index 19af93fd16..6ea0b4292f 100644 --- a/src/mcp/server/sse.py +++ b/src/mcp/server/sse.py @@ -231,7 +231,7 @@ async def handle_post_message(self, scope: Scope, receive: Receive, send: Send) logger.debug(f"Received JSON: {body}") try: - message = types.JSONRPCMessage.model_validate_json(body) + message = types.JSONRPCMessage.model_validate_json(body, by_name=False) logger.debug(f"Validated client message: {message}") except ValidationError as err: logger.exception("Failed to parse message") diff --git a/src/mcp/server/stdio.py b/src/mcp/server/stdio.py index bcb9247abb..22fe116a7c 100644 --- a/src/mcp/server/stdio.py +++ b/src/mcp/server/stdio.py @@ -62,7 +62,7 @@ async def stdin_reader(): async with read_stream_writer: async for line in stdin: try: - message = types.JSONRPCMessage.model_validate_json(line) + message = types.JSONRPCMessage.model_validate_json(line, by_name=False) except Exception as exc: # pragma: no cover await read_stream_writer.send(exc) continue diff --git a/src/mcp/server/streamable_http.py b/src/mcp/server/streamable_http.py index 2613b530c4..8f488557b2 100644 --- a/src/mcp/server/streamable_http.py +++ b/src/mcp/server/streamable_http.py @@ -471,7 +471,7 @@ async def _handle_post_request(self, scope: Scope, request: Request, receive: Re return try: # pragma: no cover - message = JSONRPCMessage.model_validate(raw_message) + message = JSONRPCMessage.model_validate(raw_message, by_name=False) except ValidationError as e: # pragma: no cover response = self._create_error_response( f"Validation error: {str(e)}", diff --git a/src/mcp/server/validation.py b/src/mcp/server/validation.py index 2ccd7056bd..003a9d1236 100644 --- a/src/mcp/server/validation.py +++ b/src/mcp/server/validation.py @@ -99,6 +99,6 @@ def validate_tool_use_result_messages(messages: list[SamplingMessage]) -> None: if has_previous_tool_use and previous_content: tool_use_ids = {c.id for c in previous_content if c.type == "tool_use"} - tool_result_ids = {c.toolUseId for c in last_content if c.type == "tool_result"} + tool_result_ids = {c.tool_use_id for c in last_content if c.type == "tool_result"} if tool_use_ids != tool_result_ids: raise ValueError("ids of tool_result blocks and tool_use blocks from previous message do not match") diff --git a/src/mcp/server/websocket.py b/src/mcp/server/websocket.py index 5d5efd16e9..3aacc467d1 100644 --- a/src/mcp/server/websocket.py +++ b/src/mcp/server/websocket.py @@ -37,7 +37,7 @@ async def ws_reader(): async with read_stream_writer: async for msg in websocket.iter_text(): try: - client_message = types.JSONRPCMessage.model_validate_json(msg) + client_message = types.JSONRPCMessage.model_validate_json(msg, by_name=False) except ValidationError as exc: await read_stream_writer.send(exc) continue diff --git a/src/mcp/shared/exceptions.py b/src/mcp/shared/exceptions.py index 80f202443f..609e5f3f32 100644 --- a/src/mcp/shared/exceptions.py +++ b/src/mcp/shared/exceptions.py @@ -49,7 +49,7 @@ class UrlElicitationRequiredError(McpError): mode="url", message="Authorization required for your files", url="https://example.com/oauth/authorize", - elicitationId="auth-001" + elicitation_id="auth-001" ) ]) """ diff --git a/src/mcp/shared/experimental/tasks/capabilities.py b/src/mcp/shared/experimental/tasks/capabilities.py index 307fcdd6e5..37c6597ec2 100644 --- a/src/mcp/shared/experimental/tasks/capabilities.py +++ b/src/mcp/shared/experimental/tasks/capabilities.py @@ -48,8 +48,8 @@ def check_tasks_capability( if required.requests.sampling is not None: if client.requests.sampling is None: return False - if required.requests.sampling.createMessage is not None: - if client.requests.sampling.createMessage is None: + if required.requests.sampling.create_message is not None: + if client.requests.sampling.create_message is None: return False return True @@ -74,7 +74,7 @@ def has_task_augmented_sampling(caps: ClientCapabilities) -> bool: return False if caps.tasks.requests.sampling is None: return False - return caps.tasks.requests.sampling.createMessage is not None + return caps.tasks.requests.sampling.create_message is not None def require_task_augmented_elicitation(client_caps: ClientCapabilities | None) -> None: diff --git a/src/mcp/shared/experimental/tasks/context.py b/src/mcp/shared/experimental/tasks/context.py index 12d159515c..c7f0da0fcd 100644 --- a/src/mcp/shared/experimental/tasks/context.py +++ b/src/mcp/shared/experimental/tasks/context.py @@ -41,7 +41,7 @@ def __init__(self, task: Task, store: TaskStore): @property def task_id(self) -> str: """The task identifier.""" - return self._task.taskId + return self._task.task_id @property def task(self) -> Task: diff --git a/src/mcp/shared/experimental/tasks/helpers.py b/src/mcp/shared/experimental/tasks/helpers.py index 5c87f9ef87..cfd2629357 100644 --- a/src/mcp/shared/experimental/tasks/helpers.py +++ b/src/mcp/shared/experimental/tasks/helpers.py @@ -125,12 +125,12 @@ def create_task_state( """ now = datetime.now(timezone.utc) return Task( - taskId=task_id or generate_task_id(), + task_id=task_id or generate_task_id(), status=TASK_STATUS_WORKING, - createdAt=now, - lastUpdatedAt=now, + created_at=now, + last_updated_at=now, ttl=metadata.ttl, - pollInterval=500, # Default 500ms poll interval + poll_interval=500, # Default 500ms poll interval ) diff --git a/src/mcp/shared/experimental/tasks/in_memory_task_store.py b/src/mcp/shared/experimental/tasks/in_memory_task_store.py index 7b630ce6e2..e499211088 100644 --- a/src/mcp/shared/experimental/tasks/in_memory_task_store.py +++ b/src/mcp/shared/experimental/tasks/in_memory_task_store.py @@ -79,14 +79,14 @@ async def create_task( task = create_task_state(metadata, task_id) - if task.taskId in self._tasks: - raise ValueError(f"Task with ID {task.taskId} already exists") + if task.task_id in self._tasks: + raise ValueError(f"Task with ID {task.task_id} already exists") stored = StoredTask( task=task, expires_at=self._calculate_expiry(metadata.ttl), ) - self._tasks[task.taskId] = stored + self._tasks[task.task_id] = stored # Return a copy to prevent external modification return Task(**task.model_dump()) @@ -124,10 +124,10 @@ async def update_task( status_changed = True if status_message is not None: - stored.task.statusMessage = status_message + stored.task.status_message = status_message - # Update lastUpdatedAt on any change - stored.task.lastUpdatedAt = datetime.now(timezone.utc) + # Update last_updated_at on any change + stored.task.last_updated_at = datetime.now(timezone.utc) # If task is now terminal and has TTL, reset expiry timer if status is not None and is_terminal(status) and stored.task.ttl is not None: diff --git a/src/mcp/shared/experimental/tasks/polling.py b/src/mcp/shared/experimental/tasks/polling.py index 39db2e6b68..18cc262277 100644 --- a/src/mcp/shared/experimental/tasks/polling.py +++ b/src/mcp/shared/experimental/tasks/polling.py @@ -41,5 +41,5 @@ async def poll_until_terminal( if is_terminal(status.status): break - interval_ms = status.pollInterval if status.pollInterval is not None else default_interval_ms + interval_ms = status.poll_interval if status.poll_interval is not None else default_interval_ms await anyio.sleep(interval_ms / 1000) diff --git a/src/mcp/shared/progress.py b/src/mcp/shared/progress.py index a230c58b45..245654d109 100644 --- a/src/mcp/shared/progress.py +++ b/src/mcp/shared/progress.py @@ -48,10 +48,10 @@ def progress( ProgressContext[SendRequestT, SendNotificationT, SendResultT, ReceiveRequestT, ReceiveNotificationT], None, ]: - if ctx.meta is None or ctx.meta.progressToken is None: # pragma: no cover + if ctx.meta is None or ctx.meta.progress_token is None: # pragma: no cover raise ValueError("No progress token provided") - progress_ctx = ProgressContext(ctx.session, ctx.meta.progressToken, total) + progress_ctx = ProgressContext(ctx.session, ctx.meta.progress_token, total) try: yield progress_ctx finally: diff --git a/src/mcp/shared/session.py b/src/mcp/shared/session.py index 51d1f64e02..e0a3bc42c1 100644 --- a/src/mcp/shared/session.py +++ b/src/mcp/shared/session.py @@ -301,7 +301,7 @@ async def send_request( if isinstance(response_or_error, JSONRPCError): raise McpError(response_or_error.error) else: - return result_type.model_validate(response_or_error.result) + return result_type.model_validate(response_or_error.result, by_name=False) finally: self._response_streams.pop(request_id, None) @@ -356,7 +356,8 @@ async def _receive_loop(self) -> None: elif isinstance(message.message.root, JSONRPCRequest): try: validated_request = self._receive_request_type.model_validate( - message.message.root.model_dump(by_alias=True, mode="json", exclude_none=True) + message.message.root.model_dump(by_alias=True, mode="json", exclude_none=True), + by_name=False, ) responder = RequestResponder( request_id=message.message.root.id, @@ -393,17 +394,18 @@ async def _receive_loop(self) -> None: elif isinstance(message.message.root, JSONRPCNotification): try: notification = self._receive_notification_type.model_validate( - message.message.root.model_dump(by_alias=True, mode="json", exclude_none=True) + message.message.root.model_dump(by_alias=True, mode="json", exclude_none=True), + by_name=False, ) # Handle cancellation notifications if isinstance(notification.root, CancelledNotification): - cancelled_id = notification.root.params.requestId + cancelled_id = notification.root.params.request_id if cancelled_id in self._in_flight: # pragma: no branch await self._in_flight[cancelled_id].cancel() else: # Handle progress notifications callback if isinstance(notification.root, ProgressNotification): # pragma: no cover - progress_token = notification.root.params.progressToken + progress_token = notification.root.params.progress_token # If there is a progress callback for this token, # call it with the progress information if progress_token in self._progress_callbacks: diff --git a/src/mcp/types.py b/src/mcp/types.py index 6fc551035b..a43461f07a 100644 --- a/src/mcp/types.py +++ b/src/mcp/types.py @@ -5,6 +5,7 @@ from typing import Annotated, Any, Final, Generic, Literal, TypeAlias, TypeVar from pydantic import BaseModel, ConfigDict, Field, FileUrl, RootModel +from pydantic.alias_generators import to_camel LATEST_PROTOCOL_VERSION = "2025-11-25" @@ -31,7 +32,7 @@ class MCPModel(BaseModel): """Base class for all MCP protocol types. Allows extra fields for forward compatibility.""" - model_config = ConfigDict(extra="allow") + model_config = ConfigDict(extra="allow", alias_generator=to_camel, populate_by_name=True) class TaskMetadata(MCPModel): @@ -46,7 +47,7 @@ class TaskMetadata(MCPModel): class RequestParams(MCPModel): class Meta(MCPModel): - progressToken: ProgressToken | None = None + progress_token: ProgressToken | None = None """ If specified, the caller requests out-of-band progress notifications for this request (as represented by notifications/progress). The value of this @@ -123,7 +124,7 @@ class Result(MCPModel): class PaginatedResult(Result): - nextCursor: Cursor | None = None + next_cursor: Cursor | None = None """ An opaque token representing the pagination position after the last returned result. If present, there may be more results available. @@ -228,7 +229,7 @@ class Icon(MCPModel): src: str """URL or data URI for the icon.""" - mimeType: str | None = None + mime_type: str | None = None """Optional MIME type for the icon.""" sizes: list[str] | None = None @@ -246,7 +247,7 @@ class Implementation(BaseMetadata): description: str | None = None """An optional human-readable description of what this implementation does.""" - websiteUrl: str | None = None + website_url: str | None = None """An optional URL of the website for this implementation.""" icons: list[Icon] | None = None @@ -256,7 +257,7 @@ class Implementation(BaseMetadata): class RootsCapability(MCPModel): """Capability for root operations.""" - listChanged: bool | None = None + list_changed: bool | None = None """Whether the client supports notifications for changes to the roots list.""" @@ -331,7 +332,7 @@ class TasksCreateMessageCapability(MCPModel): class TasksSamplingCapability(MCPModel): """Capability for tasks sampling operations.""" - createMessage: TasksCreateMessageCapability | None = None + create_message: TasksCreateMessageCapability | None = None class TasksCreateElicitationCapability(MCPModel): @@ -386,7 +387,7 @@ class ClientCapabilities(MCPModel): class PromptsCapability(MCPModel): """Capability for prompts operations.""" - listChanged: bool | None = None + list_changed: bool | None = None """Whether this server supports notifications for changes to the prompt list.""" @@ -395,14 +396,14 @@ class ResourcesCapability(MCPModel): subscribe: bool | None = None """Whether this server supports subscribing to resource updates.""" - listChanged: bool | None = None + list_changed: bool | None = None """Whether this server supports notifications for changes to the resource list.""" class ToolsCapability(MCPModel): """Capability for tools operations.""" - listChanged: bool | None = None + list_changed: bool | None = None """Whether this server supports notifications for changes to the tool list.""" @@ -474,20 +475,20 @@ class RelatedTaskMetadata(MCPModel): Include this in the `_meta` field under the key `io.modelcontextprotocol/related-task`. """ - taskId: str + task_id: str """The task identifier this message is associated with.""" class Task(MCPModel): """Data associated with a task.""" - taskId: str + task_id: str """The task identifier.""" status: TaskStatus """Current task state.""" - statusMessage: str | None = None + status_message: str | None = None """ Optional human-readable message describing the current task state. This can provide context for any status, including: @@ -496,16 +497,16 @@ class Task(MCPModel): - Diagnostic information for "failed" status (e.g., error details, what went wrong) """ - createdAt: datetime # Pydantic will enforce ISO 8601 and re-serialize as a string later + created_at: datetime # Pydantic will enforce ISO 8601 and re-serialize as a string later """ISO 8601 timestamp when the task was created.""" - lastUpdatedAt: datetime + last_updated_at: datetime """ISO 8601 timestamp when the task was last updated.""" ttl: Annotated[int, Field(strict=True)] | None """Actual retention duration from creation in milliseconds, null for unlimited.""" - pollInterval: Annotated[int, Field(strict=True)] | None = None + poll_interval: Annotated[int, Field(strict=True)] | None = None """Suggested polling interval in milliseconds.""" @@ -516,7 +517,7 @@ class CreateTaskResult(Result): class GetTaskRequestParams(RequestParams): - taskId: str + task_id: str """The task identifier to query.""" @@ -533,7 +534,7 @@ class GetTaskResult(Result, Task): class GetTaskPayloadRequestParams(RequestParams): - taskId: str + task_id: str """The task identifier to retrieve results for.""" @@ -553,7 +554,7 @@ class GetTaskPayloadResult(Result): class CancelTaskRequestParams(RequestParams): - taskId: str + task_id: str """The task identifier to cancel.""" @@ -597,10 +598,10 @@ class TaskStatusNotification(Notification[TaskStatusNotificationParams, Literal[ class InitializeRequestParams(RequestParams): """Parameters for the initialize request.""" - protocolVersion: str | int + protocol_version: str | int """The latest version of the Model Context Protocol that the client supports.""" capabilities: ClientCapabilities - clientInfo: Implementation + client_info: Implementation class InitializeRequest(Request[InitializeRequestParams, Literal["initialize"]]): @@ -616,10 +617,10 @@ class InitializeRequest(Request[InitializeRequestParams, Literal["initialize"]]) class InitializeResult(Result): """After receiving an initialize request from the client, the server sends this.""" - protocolVersion: str | int + protocol_version: str | int """The version of the Model Context Protocol that the server wants to use.""" capabilities: ServerCapabilities - serverInfo: Implementation + server_info: Implementation instructions: str | None = None """Instructions describing how to use the server and its features.""" @@ -647,7 +648,7 @@ class PingRequest(Request[RequestParams | None, Literal["ping"]]): class ProgressNotificationParams(NotificationParams): """Parameters for progress notifications.""" - progressToken: ProgressToken + progress_token: ProgressToken """ The progress token which was given in the initial request, used to associate this notification with the request that is proceeding. @@ -694,7 +695,7 @@ class Resource(BaseMetadata): """The URI of this resource.""" description: str | None = None """A description of what this resource represents.""" - mimeType: str | None = None + mime_type: str | None = None """The MIME type of this resource, if known.""" size: int | None = None """ @@ -716,14 +717,14 @@ class Resource(BaseMetadata): class ResourceTemplate(BaseMetadata): """A template description for resources available on the server.""" - uriTemplate: str + uri_template: str """ A URI template (according to RFC 6570) that can be used to construct resource URIs. """ description: str | None = None """A human-readable description of what this template is for.""" - mimeType: str | None = None + mime_type: str | None = None """ The MIME type for all resources that match this template. This should only be included if all resources matching this template have the same type. @@ -753,7 +754,7 @@ class ListResourceTemplatesRequest(PaginatedRequest[Literal["resources/templates class ListResourceTemplatesResult(PaginatedResult): """The server's response to a resources/templates/list request from the client.""" - resourceTemplates: list[ResourceTemplate] + resource_templates: list[ResourceTemplate] class ReadResourceRequestParams(RequestParams): @@ -778,7 +779,7 @@ class ResourceContents(MCPModel): uri: str """The URI of this resource.""" - mimeType: str | None = None + mime_type: str | None = None """The MIME type of this resource, if known.""" meta: dict[str, Any] | None = Field(alias="_meta", default=None) """ @@ -956,7 +957,7 @@ class ImageContent(MCPModel): type: Literal["image"] = "image" data: str """The base64-encoded image data.""" - mimeType: str + mime_type: str """ The MIME type of the image. Different providers may support different image types. @@ -975,7 +976,7 @@ class AudioContent(MCPModel): type: Literal["audio"] = "audio" data: str """The base64-encoded audio data.""" - mimeType: str + mime_type: str """ The MIME type of the audio. Different providers may support different audio types. @@ -1027,7 +1028,7 @@ class ToolResultContent(MCPModel): type: Literal["tool_result"] = "tool_result" """Discriminator for tool result content.""" - toolUseId: str + tool_use_id: str """The unique identifier that corresponds to the tool call's id field.""" content: list[ContentBlock] = [] @@ -1036,12 +1037,12 @@ class ToolResultContent(MCPModel): Defaults to empty list if not provided. """ - structuredContent: dict[str, Any] | None = None + structured_content: dict[str, Any] | None = None """ Optional structured tool output that matches the tool's outputSchema (if defined). """ - isError: bool | None = None + is_error: bool | None = None """Whether the tool execution resulted in an error.""" meta: dict[str, Any] | None = Field(alias="_meta", default=None) @@ -1161,29 +1162,29 @@ class ToolAnnotations(MCPModel): title: str | None = None """A human-readable title for the tool.""" - readOnlyHint: bool | None = None + read_only_hint: bool | None = None """ If true, the tool does not modify its environment. Default: false """ - destructiveHint: bool | None = None + destructive_hint: bool | None = None """ If true, the tool may perform destructive updates to its environment. If false, the tool performs only additive updates. - (This property is meaningful only when `readOnlyHint == false`) + (This property is meaningful only when `read_only_hint == false`) Default: true """ - idempotentHint: bool | None = None + idempotent_hint: bool | None = None """ If true, calling the tool repeatedly with the same arguments will have no additional effect on the its environment. - (This property is meaningful only when `readOnlyHint == false`) + (This property is meaningful only when `read_only_hint == false`) Default: false """ - openWorldHint: bool | None = None + open_world_hint: bool | None = None """ If true, this tool may interact with an "open world" of external entities. If false, the tool's domain of interaction is closed. @@ -1196,7 +1197,7 @@ class ToolAnnotations(MCPModel): class ToolExecution(MCPModel): """Execution-related properties for a tool.""" - taskSupport: TaskExecutionMode | None = None + task_support: TaskExecutionMode | None = None """ Indicates whether this tool supports task-augmented execution. This allows clients to handle long-running operations through polling @@ -1215,12 +1216,12 @@ class Tool(BaseMetadata): description: str | None = None """A human-readable description of the tool.""" - inputSchema: dict[str, Any] + input_schema: dict[str, Any] """A JSON Schema object defining the expected parameters for the tool.""" - outputSchema: dict[str, Any] | None = None + output_schema: dict[str, Any] | None = None """ An optional JSON Schema object defining the structure of the tool's output - returned in the structuredContent field of a CallToolResult. + returned in the structured_content field of a CallToolResult. """ icons: list[Icon] | None = None """An optional list of icons for this tool.""" @@ -1259,9 +1260,9 @@ class CallToolResult(Result): """The server's response to a tool call.""" content: list[ContentBlock] - structuredContent: dict[str, Any] | None = None + structured_content: dict[str, Any] | None = None """An optional JSON object that represents the structured result of the tool call.""" - isError: bool = False + is_error: bool = False class ToolListChangedNotification(Notification[NotificationParams | None, Literal["notifications/tools/list_changed"]]): @@ -1349,21 +1350,21 @@ class ModelPreferences(MCPModel): MAY still use the priorities to select from ambiguous matches. """ - costPriority: float | None = None + cost_priority: float | None = None """ How much to prioritize cost when selecting a model. A value of 0 means cost is not important, while a value of 1 means cost is the most important factor. """ - speedPriority: float | None = None + speed_priority: float | None = None """ How much to prioritize sampling speed (latency) when selecting a model. A value of 0 means speed is not important, while a value of 1 means speed is the most important factor. """ - intelligencePriority: float | None = None + intelligence_priority: float | None = None """ How much to prioritize intelligence and capabilities when selecting a model. A value of 0 means intelligence is not important, while a value of 1 @@ -1392,22 +1393,22 @@ class CreateMessageRequestParams(RequestParams): """Parameters for creating a message.""" messages: list[SamplingMessage] - modelPreferences: ModelPreferences | None = None + model_preferences: ModelPreferences | None = None """ The server's preferences for which model to select. The client MAY ignore these preferences. """ - systemPrompt: str | None = None + system_prompt: str | None = None """An optional system prompt the server wants to use for sampling.""" - includeContext: IncludeContext | None = None + include_context: IncludeContext | None = None """ A request to include context from one or more MCP servers (including the caller), to be attached to the prompt. """ temperature: float | None = None - maxTokens: int + max_tokens: int """The maximum number of tokens to sample, as requested by the server.""" - stopSequences: list[str] | None = None + stop_sequences: list[str] | None = None metadata: dict[str, Any] | None = None """Optional metadata to pass through to the LLM provider.""" tools: list[Tool] | None = None @@ -1415,7 +1416,7 @@ class CreateMessageRequestParams(RequestParams): Tool definitions for the LLM to use during sampling. Requires clientCapabilities.sampling.tools to be present. """ - toolChoice: ToolChoice | None = None + tool_choice: ToolChoice | None = None """ Controls tool usage behavior. Requires clientCapabilities.sampling.tools and the tools parameter to be present. @@ -1445,7 +1446,7 @@ class CreateMessageResult(Result): """Response content. Single content block (text, image, or audio).""" model: str """The name of the model that generated the message.""" - stopReason: StopReason | None = None + stop_reason: StopReason | None = None """The reason why sampling stopped, if known.""" @@ -1460,11 +1461,11 @@ class CreateMessageResultWithTools(Result): content: SamplingMessageContentBlock | list[SamplingMessageContentBlock] """ Response content. May be a single content block or an array. - May include ToolUseContent if stopReason is 'toolUse'. + May include ToolUseContent if stop_reason is 'toolUse'. """ model: str """The name of the model that generated the message.""" - stopReason: StopReason | None = None + stop_reason: StopReason | None = None """ The reason why sampling stopped, if known. 'toolUse' indicates the model wants to use a tool. @@ -1535,7 +1536,7 @@ class Completion(MCPModel): The total number of completion options available. This can exceed the number of values actually sent in the response. """ - hasMore: bool | None = None + has_more: bool | None = None """ Indicates whether there are additional completion options beyond those provided in the current response, even if the exact total is unknown. @@ -1614,7 +1615,7 @@ class RootsListChangedNotification( class CancelledNotificationParams(NotificationParams): """Parameters for cancellation notifications.""" - requestId: RequestId | None = None + request_id: RequestId | None = None """ The ID of the request to cancel. @@ -1639,7 +1640,7 @@ class CancelledNotification(Notification[CancelledNotificationParams, Literal["n class ElicitCompleteNotificationParams(NotificationParams): """Parameters for elicitation completion notifications.""" - elicitationId: str + elicitation_id: str """The unique identifier of the elicitation that was completed.""" @@ -1716,7 +1717,7 @@ class ElicitRequestFormParams(RequestParams): message: str """The message to present to the user describing what information is being requested.""" - requestedSchema: ElicitRequestedSchema + requested_schema: ElicitRequestedSchema """ A restricted subset of JSON Schema defining the structure of expected response. Only top-level properties are allowed, without nesting. @@ -1739,7 +1740,7 @@ class ElicitRequestURLParams(RequestParams): url: str """The URL that the user should navigate to.""" - elicitationId: str + elicitation_id: str """ The ID of the elicitation, which must be unique within the context of the server. The client MUST treat this ID as an opaque value. diff --git a/tests/client/test_http_unicode.py b/tests/client/test_http_unicode.py index ec38f35838..3eee774999 100644 --- a/tests/client/test_http_unicode.py +++ b/tests/client/test_http_unicode.py @@ -61,7 +61,7 @@ async def list_tools() -> list[Tool]: Tool( name="echo_unicode", description="🔤 Echo Unicode text - Hello 👋 World 🌍 - Testing 🧪 Unicode ✨", - inputSchema={ + input_schema={ "type": "object", "properties": { "text": {"type": "string", "description": "Text to echo back"}, diff --git a/tests/client/test_list_roots_callback.py b/tests/client/test_list_roots_callback.py index 0da0fff07a..7b280b5c40 100644 --- a/tests/client/test_list_roots_callback.py +++ b/tests/client/test_list_roots_callback.py @@ -45,7 +45,7 @@ async def test_list_roots(context: Context[ServerSession, None], message: str): async with create_session(server._mcp_server, list_roots_callback=list_roots_callback) as client_session: # Make a request to trigger sampling callback result = await client_session.call_tool("test_list_roots", {"message": "test message"}) - assert result.isError is False + assert result.is_error is False assert isinstance(result.content[0], TextContent) assert result.content[0].text == "true" @@ -53,6 +53,6 @@ async def test_list_roots(context: Context[ServerSession, None], message: str): async with create_session(server._mcp_server) as client_session: # Make a request to trigger sampling callback result = await client_session.call_tool("test_list_roots", {"message": "test message"}) - assert result.isError is True + assert result.is_error is True assert isinstance(result.content[0], TextContent) assert result.content[0].text == "Error executing tool test_list_roots: List roots not supported" diff --git a/tests/client/test_logging_callback.py b/tests/client/test_logging_callback.py index de058eb061..2511e279c8 100644 --- a/tests/client/test_logging_callback.py +++ b/tests/client/test_logging_callback.py @@ -78,7 +78,7 @@ async def message_handler( ) as client_session: # First verify our test tool works result = await client_session.call_tool("test_tool", {}) - assert result.isError is False + assert result.is_error is False assert isinstance(result.content[0], TextContent) assert result.content[0].text == "true" @@ -101,8 +101,8 @@ async def message_handler( "extra_dict": {"a": 1, "b": 2, "c": 3}, }, ) - assert log_result.isError is False - assert log_result_with_extra.isError is False + assert log_result.is_error is False + assert log_result_with_extra.is_error is False assert len(logging_collector.log_messages) == 2 # Create meta object with related_request_id added dynamically log = logging_collector.log_messages[0] diff --git a/tests/client/test_output_schema_validation.py b/tests/client/test_output_schema_validation.py index e4a06b7f82..6e06c99fad 100644 --- a/tests/client/test_output_schema_validation.py +++ b/tests/client/test_output_schema_validation.py @@ -66,8 +66,8 @@ async def list_tools(): Tool( name="get_user", description="Get user data", - inputSchema={"type": "object"}, - outputSchema=output_schema, + input_schema={"type": "object"}, + output_schema=output_schema, ) ] @@ -105,8 +105,8 @@ async def list_tools(): Tool( name="calculate", description="Calculate something", - inputSchema={"type": "object"}, - outputSchema=output_schema, + input_schema={"type": "object"}, + output_schema=output_schema, ) ] @@ -136,8 +136,8 @@ async def list_tools(): Tool( name="get_scores", description="Get scores", - inputSchema={"type": "object"}, - outputSchema=output_schema, + input_schema={"type": "object"}, + output_schema=output_schema, ) ] @@ -171,8 +171,8 @@ async def list_tools(): Tool( name="get_person", description="Get person data", - inputSchema={"type": "object"}, - outputSchema=output_schema, + input_schema={"type": "object"}, + output_schema=output_schema, ) ] @@ -190,7 +190,7 @@ async def call_tool(name: str, arguments: dict[str, Any]): @pytest.mark.anyio async def test_tool_not_listed_warning(self, caplog: pytest.LogCaptureFixture): - """Test that client logs warning when tool is not in list_tools but has outputSchema""" + """Test that client logs warning when tool is not in list_tools but has output_schema""" server = Server("test-server") @server.list_tools() @@ -210,8 +210,8 @@ async def call_tool(name: str, arguments: dict[str, Any]) -> dict[str, Any]: async with client_session(server) as client: # Call a tool that wasn't listed result = await client.call_tool("mystery_tool", {}) - assert result.structuredContent == {"result": 42} - assert result.isError is False + assert result.structured_content == {"result": 42} + assert result.is_error is False # Check that warning was logged assert "Tool mystery_tool not listed" in caplog.text diff --git a/tests/client/test_sampling_callback.py b/tests/client/test_sampling_callback.py index 733364a767..b1e483116d 100644 --- a/tests/client/test_sampling_callback.py +++ b/tests/client/test_sampling_callback.py @@ -25,7 +25,7 @@ async def test_sampling_callback(): role="assistant", content=TextContent(type="text", text="This is a response from the sampling callback"), model="test-model", - stopReason="endTurn", + stop_reason="endTurn", ) async def sampling_callback( @@ -47,7 +47,7 @@ async def test_sampling_tool(message: str): async with create_session(server._mcp_server, sampling_callback=sampling_callback) as client_session: # Make a request to trigger sampling callback result = await client_session.call_tool("test_sampling", {"message": "Test message for sampling"}) - assert result.isError is False + assert result.is_error is False assert isinstance(result.content[0], TextContent) assert result.content[0].text == "true" @@ -55,7 +55,7 @@ async def test_sampling_tool(message: str): async with create_session(server._mcp_server) as client_session: # Make a request to trigger sampling callback result = await client_session.call_tool("test_sampling", {"message": "Test message for sampling"}) - assert result.isError is True + assert result.is_error is True assert isinstance(result.content[0], TextContent) assert result.content[0].text == "Error executing tool test_sampling: Sampling not supported" @@ -72,7 +72,7 @@ async def test_create_message_backwards_compat_single_content(): role="assistant", content=TextContent(type="text", text="Hello from LLM"), model="test-model", - stopReason="endTurn", + stop_reason="endTurn", ) async def sampling_callback( @@ -99,7 +99,7 @@ async def test_tool(message: str): async with create_session(server._mcp_server, sampling_callback=sampling_callback) as client_session: result = await client_session.call_tool("test_backwards_compat", {"message": "Test"}) - assert result.isError is False + assert result.is_error is False assert isinstance(result.content[0], TextContent) assert result.content[0].text == "true" @@ -112,7 +112,7 @@ async def test_create_message_result_with_tools_type(): role="assistant", content=ToolUseContent(type="tool_use", id="call_123", name="get_weather", input={"city": "SF"}), model="test-model", - stopReason="toolUse", + stop_reason="toolUse", ) # CreateMessageResultWithTools should have content_as_list @@ -128,7 +128,7 @@ async def test_create_message_result_with_tools_type(): ToolUseContent(type="tool_use", id="call_456", name="get_weather", input={"city": "NYC"}), ], model="test-model", - stopReason="toolUse", + stop_reason="toolUse", ) content_list_array = result_array.content_as_list assert len(content_list_array) == 2 diff --git a/tests/client/test_session.py b/tests/client/test_session.py index eb2683fbdb..78df8ed191 100644 --- a/tests/client/test_session.py +++ b/tests/client/test_session.py @@ -49,7 +49,7 @@ async def mock_server(): result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities( logging=None, resources=None, @@ -57,7 +57,7 @@ async def mock_server(): experimental=None, prompts=None, ), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), instructions="The server instructions.", ) ) @@ -105,9 +105,9 @@ async def message_handler( # pragma: no cover # Assert the result assert isinstance(result, InitializeResult) - assert result.protocolVersion == LATEST_PROTOCOL_VERSION + assert result.protocol_version == LATEST_PROTOCOL_VERSION assert isinstance(result.capabilities, ServerCapabilities) - assert result.serverInfo == Implementation(name="mock-server", version="0.1.0") + assert result.server_info == Implementation(name="mock-server", version="0.1.0") assert result.instructions == "The server instructions." # Check that the client sent the initialized notification @@ -133,13 +133,13 @@ async def mock_server(): jsonrpc_request.model_dump(by_alias=True, mode="json", exclude_none=True) ) assert isinstance(request.root, InitializeRequest) - received_client_info = request.root.params.clientInfo + received_client_info = request.root.params.client_info result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -194,13 +194,13 @@ async def mock_server(): jsonrpc_request.model_dump(by_alias=True, mode="json", exclude_none=True) ) assert isinstance(request.root, InitializeRequest) - received_client_info = request.root.params.clientInfo + received_client_info = request.root.params.client_info result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -254,14 +254,14 @@ async def mock_server(): assert isinstance(request.root, InitializeRequest) # Verify client sent the latest protocol version - assert request.root.params.protocolVersion == LATEST_PROTOCOL_VERSION + assert request.root.params.protocol_version == LATEST_PROTOCOL_VERSION # Server responds with a supported older version result = ServerResult( InitializeResult( - protocolVersion="2024-11-05", + protocol_version="2024-11-05", capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -296,8 +296,8 @@ async def mock_server(): # Assert the result with negotiated version assert isinstance(result, InitializeResult) - assert result.protocolVersion == "2024-11-05" - assert result.protocolVersion in SUPPORTED_PROTOCOL_VERSIONS + assert result.protocol_version == "2024-11-05" + assert result.protocol_version in SUPPORTED_PROTOCOL_VERSIONS @pytest.mark.anyio @@ -318,9 +318,9 @@ async def mock_server(): # Server responds with an unsupported version result = ServerResult( InitializeResult( - protocolVersion="2020-01-01", # Unsupported old version + protocol_version="2020-01-01", # Unsupported old version capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -377,9 +377,9 @@ async def mock_server(): result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -455,9 +455,9 @@ async def mock_server(): result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -503,7 +503,7 @@ async def mock_server(): assert received_capabilities.roots is not None assert isinstance(received_capabilities.roots, types.RootsCapability) # Should be True for custom callback - assert received_capabilities.roots.listChanged is True + assert received_capabilities.roots.list_changed is True @pytest.mark.anyio @@ -538,9 +538,9 @@ async def mock_server(): result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -592,9 +592,9 @@ async def test_get_server_capabilities(): expected_capabilities = ServerCapabilities( logging=types.LoggingCapability(), - prompts=types.PromptsCapability(listChanged=True), - resources=types.ResourcesCapability(subscribe=True, listChanged=True), - tools=types.ToolsCapability(listChanged=False), + prompts=types.PromptsCapability(list_changed=True), + resources=types.ResourcesCapability(subscribe=True, list_changed=True), + tools=types.ToolsCapability(list_changed=False), ) async def mock_server(): @@ -608,9 +608,9 @@ async def mock_server(): result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=expected_capabilities, - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -649,11 +649,11 @@ async def mock_server(): assert capabilities == expected_capabilities assert capabilities.logging is not None assert capabilities.prompts is not None - assert capabilities.prompts.listChanged is True + assert capabilities.prompts.list_changed is True assert capabilities.resources is not None assert capabilities.resources.subscribe is True assert capabilities.tools is not None - assert capabilities.tools.listChanged is False + assert capabilities.tools.list_changed is False @pytest.mark.anyio @@ -663,7 +663,7 @@ async def test_client_tool_call_with_meta(meta: dict[str, Any] | None): client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](1) server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](1) - mocked_tool = types.Tool(name="sample_tool", inputSchema={}) + mocked_tool = types.Tool(name="sample_tool", input_schema={}) async def mock_server(): # Receive initialization request from client @@ -677,9 +677,9 @@ async def mock_server(): result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -712,7 +712,7 @@ async def mock_server(): assert jsonrpc_request.root.params["_meta"] == meta result = ServerResult( - CallToolResult(content=[TextContent(type="text", text="Called successfully")], isError=False) + CallToolResult(content=[TextContent(type="text", text="Called successfully")], is_error=False) ) # Send the tools/call result diff --git a/tests/client/test_session_group.py b/tests/client/test_session_group.py index ed07293aed..22194c5eda 100644 --- a/tests/client/test_session_group.py +++ b/tests/client/test_session_group.py @@ -59,7 +59,7 @@ def hook(name: str, server_info: types.Implementation) -> str: # pragma: no cov return f"{(server_info.name)}-{name}" mcp_session_group = ClientSessionGroup(component_name_hook=hook) - mcp_session_group._tools = {"server1-my_tool": types.Tool(name="my_tool", inputSchema={})} + mcp_session_group._tools = {"server1-my_tool": types.Tool(name="my_tool", input_schema={})} mcp_session_group._tool_to_session = {"server1-my_tool": mock_session} text_content = types.TextContent(type="text", text="OK") mock_session.call_tool.return_value = types.CallToolResult(content=[text_content]) @@ -324,7 +324,7 @@ async def test_establish_session_parameterized( # Mock session.initialize() mock_initialize_result = mock.AsyncMock(name="InitializeResult") - mock_initialize_result.serverInfo = types.Implementation(name="foo", version="1") + mock_initialize_result.server_info = types.Implementation(name="foo", version="1") mock_entered_session.initialize.return_value = mock_initialize_result # --- Test Execution --- @@ -381,5 +381,5 @@ async def test_establish_session_parameterized( mock_entered_session.initialize.assert_awaited_once() # 3. Assert returned values - assert returned_server_info is mock_initialize_result.serverInfo + assert returned_server_info is mock_initialize_result.server_info assert returned_session is mock_entered_session diff --git a/tests/experimental/tasks/client/test_capabilities.py b/tests/experimental/tasks/client/test_capabilities.py index f2def4e3a6..de73b8c062 100644 --- a/tests/experimental/tasks/client/test_capabilities.py +++ b/tests/experimental/tasks/client/test_capabilities.py @@ -45,9 +45,9 @@ async def mock_server(): result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -119,9 +119,9 @@ async def mock_server(): result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -203,9 +203,9 @@ async def mock_server(): result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) @@ -283,9 +283,9 @@ async def mock_server(): result = ServerResult( InitializeResult( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), - serverInfo=Implementation(name="mock-server", version="0.1.0"), + server_info=Implementation(name="mock-server", version="0.1.0"), ) ) diff --git a/tests/experimental/tasks/client/test_handlers.py b/tests/experimental/tasks/client/test_handlers.py index 86cea42ae1..0e4e8f45a5 100644 --- a/tests/experimental/tasks/client/test_handlers.py +++ b/tests/experimental/tasks/client/test_handlers.py @@ -117,17 +117,17 @@ async def get_task_handler( params: GetTaskRequestParams, ) -> GetTaskResult | ErrorData: nonlocal received_task_id - received_task_id = params.taskId - task = await store.get_task(params.taskId) - assert task is not None, f"Test setup error: task {params.taskId} should exist" + received_task_id = params.task_id + task = await store.get_task(params.task_id) + assert task is not None, f"Test setup error: task {params.task_id} should exist" return GetTaskResult( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=task.pollInterval, + poll_interval=task.poll_interval, ) await store.create_task(TaskMetadata(ttl=60000), task_id="test-task-123") @@ -150,7 +150,7 @@ async def run_client() -> None: tg.start_soon(run_client) await client_ready.wait() - typed_request = GetTaskRequest(params=GetTaskRequestParams(taskId="test-task-123")) + typed_request = GetTaskRequest(params=GetTaskRequestParams(task_id="test-task-123")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-1", @@ -164,7 +164,7 @@ async def run_client() -> None: assert response.id == "req-1" result = GetTaskResult.model_validate(response.result) - assert result.taskId == "test-task-123" + assert result.task_id == "test-task-123" assert result.status == "working" assert received_task_id == "test-task-123" @@ -183,8 +183,8 @@ async def get_task_result_handler( context: RequestContext[ClientSession, None], params: GetTaskPayloadRequestParams, ) -> GetTaskPayloadResult | ErrorData: - result = await store.get_result(params.taskId) - assert result is not None, f"Test setup error: result for {params.taskId} should exist" + result = await store.get_result(params.task_id) + assert result is not None, f"Test setup error: result for {params.task_id} should exist" assert isinstance(result, types.CallToolResult) return GetTaskPayloadResult(**result.model_dump()) @@ -213,7 +213,7 @@ async def run_client() -> None: tg.start_soon(run_client) await client_ready.wait() - typed_request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId="test-task-456")) + typed_request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id="test-task-456")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-2", @@ -248,7 +248,7 @@ async def list_tasks_handler( ) -> ListTasksResult | ErrorData: cursor = params.cursor if params else None tasks_list, next_cursor = await store.list_tasks(cursor=cursor) - return ListTasksResult(tasks=tasks_list, nextCursor=next_cursor) + return ListTasksResult(tasks=tasks_list, next_cursor=next_cursor) await store.create_task(TaskMetadata(ttl=60000), task_id="task-1") await store.create_task(TaskMetadata(ttl=60000), task_id="task-2") @@ -301,16 +301,16 @@ async def cancel_task_handler( context: RequestContext[ClientSession, None], params: CancelTaskRequestParams, ) -> CancelTaskResult | ErrorData: - task = await store.get_task(params.taskId) - assert task is not None, f"Test setup error: task {params.taskId} should exist" - await store.update_task(params.taskId, status="cancelled") - updated = await store.get_task(params.taskId) + task = await store.get_task(params.task_id) + assert task is not None, f"Test setup error: task {params.task_id} should exist" + await store.update_task(params.task_id, status="cancelled") + updated = await store.get_task(params.task_id) assert updated is not None return CancelTaskResult( - taskId=updated.taskId, + task_id=updated.task_id, status=updated.status, - createdAt=updated.createdAt, - lastUpdatedAt=updated.lastUpdatedAt, + created_at=updated.created_at, + last_updated_at=updated.last_updated_at, ttl=updated.ttl, ) @@ -334,7 +334,7 @@ async def run_client() -> None: tg.start_soon(run_client) await client_ready.wait() - typed_request = CancelTaskRequest(params=CancelTaskRequestParams(taskId="task-to-cancel")) + typed_request = CancelTaskRequest(params=CancelTaskRequestParams(task_id="task-to-cancel")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-4", @@ -347,7 +347,7 @@ async def run_client() -> None: assert isinstance(response, types.JSONRPCResponse) result = CancelTaskResult.model_validate(response.result) - assert result.taskId == "task-to-cancel" + assert result.task_id == "task-to-cancel" assert result.status == "cancelled" tg.cancel_scope.cancel() @@ -370,17 +370,17 @@ async def task_augmented_sampling_callback( task_metadata: TaskMetadata, ) -> CreateTaskResult: task = await store.create_task(task_metadata) - created_task_id[0] = task.taskId + created_task_id[0] = task.task_id async def do_sampling() -> None: result = CreateMessageResult( role="assistant", content=TextContent(type="text", text="Sampled response"), model="test-model", - stopReason="endTurn", + stop_reason="endTurn", ) - await store.store_result(task.taskId, result) - await store.update_task(task.taskId, status="completed") + await store.store_result(task.task_id, result) + await store.update_task(task.task_id, status="completed") sampling_completed.set() assert background_tg[0] is not None @@ -391,24 +391,24 @@ async def get_task_handler( context: RequestContext[ClientSession, None], params: GetTaskRequestParams, ) -> GetTaskResult | ErrorData: - task = await store.get_task(params.taskId) - assert task is not None, f"Test setup error: task {params.taskId} should exist" + task = await store.get_task(params.task_id) + assert task is not None, f"Test setup error: task {params.task_id} should exist" return GetTaskResult( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=task.pollInterval, + poll_interval=task.poll_interval, ) async def get_task_result_handler( context: RequestContext[ClientSession, None], params: GetTaskPayloadRequestParams, ) -> GetTaskPayloadResult | ErrorData: - result = await store.get_result(params.taskId) - assert result is not None, f"Test setup error: result for {params.taskId} should exist" + result = await store.get_result(params.task_id) + assert result is not None, f"Test setup error: result for {params.task_id} should exist" assert isinstance(result, CreateMessageResult) return GetTaskPayloadResult(**result.model_dump()) @@ -439,7 +439,7 @@ async def run_client() -> None: typed_request = CreateMessageRequest( params=CreateMessageRequestParams( messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], - maxTokens=100, + max_tokens=100, task=TaskMetadata(ttl=60000), ) ) @@ -456,14 +456,14 @@ async def run_client() -> None: assert isinstance(response, types.JSONRPCResponse) task_result = CreateTaskResult.model_validate(response.result) - task_id = task_result.task.taskId + task_id = task_result.task.task_id assert task_id == created_task_id[0] # Step 3: Wait for background sampling await sampling_completed.wait() # Step 4: Server polls task status - typed_poll = GetTaskRequest(params=GetTaskRequestParams(taskId=task_id)) + typed_poll = GetTaskRequest(params=GetTaskRequestParams(task_id=task_id)) poll_request = types.JSONRPCRequest( jsonrpc="2.0", id="req-poll", @@ -479,7 +479,7 @@ async def run_client() -> None: assert status.status == "completed" # Step 5: Server gets result - typed_result_req = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId=task_id)) + typed_result_req = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task_id)) result_request = types.JSONRPCRequest( jsonrpc="2.0", id="req-result", @@ -514,13 +514,13 @@ async def task_augmented_elicitation_callback( task_metadata: TaskMetadata, ) -> CreateTaskResult | ErrorData: task = await store.create_task(task_metadata) - created_task_id[0] = task.taskId + created_task_id[0] = task.task_id async def do_elicitation() -> None: # Simulate user providing elicitation response result = ElicitResult(action="accept", content={"name": "Test User"}) - await store.store_result(task.taskId, result) - await store.update_task(task.taskId, status="completed") + await store.store_result(task.task_id, result) + await store.update_task(task.task_id, status="completed") elicitation_completed.set() assert background_tg[0] is not None @@ -531,24 +531,24 @@ async def get_task_handler( context: RequestContext[ClientSession, None], params: GetTaskRequestParams, ) -> GetTaskResult | ErrorData: - task = await store.get_task(params.taskId) - assert task is not None, f"Test setup error: task {params.taskId} should exist" + task = await store.get_task(params.task_id) + assert task is not None, f"Test setup error: task {params.task_id} should exist" return GetTaskResult( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=task.pollInterval, + poll_interval=task.poll_interval, ) async def get_task_result_handler( context: RequestContext[ClientSession, None], params: GetTaskPayloadRequestParams, ) -> GetTaskPayloadResult | ErrorData: - result = await store.get_result(params.taskId) - assert result is not None, f"Test setup error: result for {params.taskId} should exist" + result = await store.get_result(params.task_id) + assert result is not None, f"Test setup error: result for {params.task_id} should exist" assert isinstance(result, ElicitResult) return GetTaskPayloadResult(**result.model_dump()) @@ -579,7 +579,7 @@ async def run_client() -> None: typed_request = ElicitRequest( params=ElicitRequestFormParams( message="What is your name?", - requestedSchema={"type": "object", "properties": {"name": {"type": "string"}}}, + requested_schema={"type": "object", "properties": {"name": {"type": "string"}}}, task=TaskMetadata(ttl=60000), ) ) @@ -596,14 +596,14 @@ async def run_client() -> None: assert isinstance(response, types.JSONRPCResponse) task_result = CreateTaskResult.model_validate(response.result) - task_id = task_result.task.taskId + task_id = task_result.task.task_id assert task_id == created_task_id[0] # Step 3: Wait for background elicitation await elicitation_completed.wait() # Step 4: Server polls task status - typed_poll = GetTaskRequest(params=GetTaskRequestParams(taskId=task_id)) + typed_poll = GetTaskRequest(params=GetTaskRequestParams(task_id=task_id)) poll_request = types.JSONRPCRequest( jsonrpc="2.0", id="req-poll", @@ -619,7 +619,7 @@ async def run_client() -> None: assert status.status == "completed" # Step 5: Server gets result - typed_result_req = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId=task_id)) + typed_result_req = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task_id)) result_request = types.JSONRPCRequest( jsonrpc="2.0", id="req-result", @@ -661,7 +661,7 @@ async def run_client() -> None: tg.start_soon(run_client) await client_ready.wait() - typed_request = GetTaskRequest(params=GetTaskRequestParams(taskId="nonexistent")) + typed_request = GetTaskRequest(params=GetTaskRequestParams(task_id="nonexistent")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-unhandled", @@ -700,7 +700,7 @@ async def run_client() -> None: tg.start_soon(run_client) await client_ready.wait() - typed_request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId="nonexistent")) + typed_request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id="nonexistent")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-result", @@ -772,7 +772,7 @@ async def run_client() -> None: tg.start_soon(run_client) await client_ready.wait() - typed_request = CancelTaskRequest(params=CancelTaskRequestParams(taskId="nonexistent")) + typed_request = CancelTaskRequest(params=CancelTaskRequestParams(task_id="nonexistent")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-cancel", @@ -813,7 +813,7 @@ async def run_client() -> None: typed_request = CreateMessageRequest( params=CreateMessageRequestParams( messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], - maxTokens=100, + max_tokens=100, task=TaskMetadata(ttl=60000), ) ) @@ -859,7 +859,7 @@ async def run_client() -> None: typed_request = ElicitRequest( params=ElicitRequestFormParams( message="What is your name?", - requestedSchema={"type": "object", "properties": {"name": {"type": "string"}}}, + requested_schema={"type": "object", "properties": {"name": {"type": "string"}}}, task=TaskMetadata(ttl=60000), ) ) diff --git a/tests/experimental/tasks/client/test_poll_task.py b/tests/experimental/tasks/client/test_poll_task.py index 8275dc668e..5e3158d955 100644 --- a/tests/experimental/tasks/client/test_poll_task.py +++ b/tests/experimental/tasks/client/test_poll_task.py @@ -20,13 +20,13 @@ def make_task_result( """Create GetTaskResult with sensible defaults.""" now = datetime.now(timezone.utc) return GetTaskResult( - taskId=task_id, + task_id=task_id, status=status, - statusMessage=status_message, - createdAt=now, - lastUpdatedAt=now, + status_message=status_message, + created_at=now, + last_updated_at=now, ttl=60000, - pollInterval=poll_interval, + poll_interval=poll_interval, ) @@ -117,5 +117,5 @@ async def mock_get_task(task_id: str) -> GetTaskResult: assert len(results) == 1 assert results[0].status == "completed" - assert results[0].statusMessage == "All done!" - assert results[0].taskId == "test-task" + assert results[0].status_message == "All done!" + assert results[0].task_id == "test-task" diff --git a/tests/experimental/tasks/client/test_tasks.py b/tests/experimental/tasks/client/test_tasks.py index 24c8891def..3c19d82d0d 100644 --- a/tests/experimental/tasks/client/test_tasks.py +++ b/tests/experimental/tasks/client/test_tasks.py @@ -58,7 +58,7 @@ async def test_session_experimental_get_task() -> None: @server.list_tools() async def list_tools(): - return [Tool(name="test_tool", description="Test", inputSchema={"type": "object"})] + return [Tool(name="test_tool", description="Test", input_schema={"type": "object"})] @server.call_tool() async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent] | CreateTaskResult: @@ -70,10 +70,10 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextCon task = await app.store.create_task(task_metadata) done_event = Event() - app.task_done_events[task.taskId] = done_event + app.task_done_events[task.task_id] = done_event async def do_work(): - async with task_execution(task.taskId, app.store) as task_ctx: + async with task_execution(task.task_id, app.store) as task_ctx: await task_ctx.complete(CallToolResult(content=[TextContent(type="text", text="Done")])) done_event.set() @@ -85,16 +85,16 @@ async def do_work(): @server.experimental.get_task() async def handle_get_task(request: GetTaskRequest) -> GetTaskResult: app = server.request_context.lifespan_context - task = await app.store.get_task(request.params.taskId) - assert task is not None, f"Test setup error: task {request.params.taskId} should exist" + task = await app.store.get_task(request.params.task_id) + assert task is not None, f"Test setup error: task {request.params.task_id} should exist" return GetTaskResult( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=task.pollInterval, + poll_interval=task.poll_interval, ) # Set up streams @@ -145,7 +145,7 @@ async def run_server(app_context: AppContext): ), CreateTaskResult, ) - task_id = create_result.task.taskId + task_id = create_result.task.task_id # Wait for task to complete await app_context.task_done_events[task_id].wait() @@ -153,7 +153,7 @@ async def run_server(app_context: AppContext): # Use session.experimental to get task status task_status = await client_session.experimental.get_task(task_id) - assert task_status.taskId == task_id + assert task_status.task_id == task_id assert task_status.status == "completed" tg.cancel_scope.cancel() @@ -167,7 +167,7 @@ async def test_session_experimental_get_task_result() -> None: @server.list_tools() async def list_tools(): - return [Tool(name="test_tool", description="Test", inputSchema={"type": "object"})] + return [Tool(name="test_tool", description="Test", input_schema={"type": "object"})] @server.call_tool() async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent] | CreateTaskResult: @@ -179,10 +179,10 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextCon task = await app.store.create_task(task_metadata) done_event = Event() - app.task_done_events[task.taskId] = done_event + app.task_done_events[task.task_id] = done_event async def do_work(): - async with task_execution(task.taskId, app.store) as task_ctx: + async with task_execution(task.task_id, app.store) as task_ctx: await task_ctx.complete( CallToolResult(content=[TextContent(type="text", text="Task result content")]) ) @@ -198,8 +198,8 @@ async def handle_get_task_result( request: GetTaskPayloadRequest, ) -> GetTaskPayloadResult: app = server.request_context.lifespan_context - result = await app.store.get_result(request.params.taskId) - assert result is not None, f"Test setup error: result for {request.params.taskId} should exist" + result = await app.store.get_result(request.params.task_id) + assert result is not None, f"Test setup error: result for {request.params.task_id} should exist" assert isinstance(result, CallToolResult) return GetTaskPayloadResult(**result.model_dump()) @@ -251,7 +251,7 @@ async def run_server(app_context: AppContext): ), CreateTaskResult, ) - task_id = create_result.task.taskId + task_id = create_result.task.task_id # Wait for task to complete await app_context.task_done_events[task_id].wait() @@ -275,7 +275,7 @@ async def test_session_experimental_list_tasks() -> None: @server.list_tools() async def list_tools(): - return [Tool(name="test_tool", description="Test", inputSchema={"type": "object"})] + return [Tool(name="test_tool", description="Test", input_schema={"type": "object"})] @server.call_tool() async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent] | CreateTaskResult: @@ -287,10 +287,10 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextCon task = await app.store.create_task(task_metadata) done_event = Event() - app.task_done_events[task.taskId] = done_event + app.task_done_events[task.task_id] = done_event async def do_work(): - async with task_execution(task.taskId, app.store) as task_ctx: + async with task_execution(task.task_id, app.store) as task_ctx: await task_ctx.complete(CallToolResult(content=[TextContent(type="text", text="Done")])) done_event.set() @@ -303,7 +303,7 @@ async def do_work(): async def handle_list_tasks(request: ListTasksRequest) -> ListTasksResult: app = server.request_context.lifespan_context tasks_list, next_cursor = await app.store.list_tasks(cursor=request.params.cursor if request.params else None) - return ListTasksResult(tasks=tasks_list, nextCursor=next_cursor) + return ListTasksResult(tasks=tasks_list, next_cursor=next_cursor) # Set up streams server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) @@ -354,7 +354,7 @@ async def run_server(app_context: AppContext): ), CreateTaskResult, ) - await app_context.task_done_events[create_result.task.taskId].wait() + await app_context.task_done_events[create_result.task.task_id].wait() # Use TaskClient to list tasks list_result = await client_session.experimental.list_tasks() @@ -372,7 +372,7 @@ async def test_session_experimental_cancel_task() -> None: @server.list_tools() async def list_tools(): - return [Tool(name="test_tool", description="Test", inputSchema={"type": "object"})] + return [Tool(name="test_tool", description="Test", input_schema={"type": "object"})] @server.call_tool() async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent] | CreateTaskResult: @@ -390,32 +390,32 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextCon @server.experimental.get_task() async def handle_get_task(request: GetTaskRequest) -> GetTaskResult: app = server.request_context.lifespan_context - task = await app.store.get_task(request.params.taskId) - assert task is not None, f"Test setup error: task {request.params.taskId} should exist" + task = await app.store.get_task(request.params.task_id) + assert task is not None, f"Test setup error: task {request.params.task_id} should exist" return GetTaskResult( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=task.pollInterval, + poll_interval=task.poll_interval, ) @server.experimental.cancel_task() async def handle_cancel_task(request: CancelTaskRequest) -> CancelTaskResult: app = server.request_context.lifespan_context - task = await app.store.get_task(request.params.taskId) - assert task is not None, f"Test setup error: task {request.params.taskId} should exist" - await app.store.update_task(request.params.taskId, status="cancelled") + task = await app.store.get_task(request.params.task_id) + assert task is not None, f"Test setup error: task {request.params.task_id} should exist" + await app.store.update_task(request.params.task_id, status="cancelled") # CancelTaskResult extends Task, so we need to return the updated task info - updated_task = await app.store.get_task(request.params.taskId) + updated_task = await app.store.get_task(request.params.task_id) assert updated_task is not None return CancelTaskResult( - taskId=updated_task.taskId, + task_id=updated_task.task_id, status=updated_task.status, - createdAt=updated_task.createdAt, - lastUpdatedAt=updated_task.lastUpdatedAt, + created_at=updated_task.created_at, + last_updated_at=updated_task.last_updated_at, ttl=updated_task.ttl, ) @@ -467,7 +467,7 @@ async def run_server(app_context: AppContext): ), CreateTaskResult, ) - task_id = create_result.task.taskId + task_id = create_result.task.task_id # Verify task is working status_before = await client_session.experimental.get_task(task_id) diff --git a/tests/experimental/tasks/server/test_context.py b/tests/experimental/tasks/server/test_context.py index 2f09ff1540..a0f1a190d2 100644 --- a/tests/experimental/tasks/server/test_context.py +++ b/tests/experimental/tasks/server/test_context.py @@ -15,8 +15,8 @@ async def test_task_context_properties() -> None: task = await store.create_task(metadata=TaskMetadata(ttl=60000)) ctx = TaskContext(task, store) - assert ctx.task_id == task.taskId - assert ctx.task.taskId == task.taskId + assert ctx.task_id == task.task_id + assert ctx.task.task_id == task.task_id assert ctx.task.status == "working" assert ctx.is_cancelled is False @@ -33,9 +33,9 @@ async def test_task_context_update_status() -> None: await ctx.update_status("Processing step 1...") # Check status message was updated - updated = await store.get_task(task.taskId) + updated = await store.get_task(task.task_id) assert updated is not None - assert updated.statusMessage == "Processing step 1..." + assert updated.status_message == "Processing step 1..." store.cleanup() @@ -51,12 +51,12 @@ async def test_task_context_complete() -> None: await ctx.complete(result) # Check task status - updated = await store.get_task(task.taskId) + updated = await store.get_task(task.task_id) assert updated is not None assert updated.status == "completed" # Check result is stored - stored_result = await store.get_result(task.taskId) + stored_result = await store.get_result(task.task_id) assert stored_result is not None store.cleanup() @@ -72,10 +72,10 @@ async def test_task_context_fail() -> None: await ctx.fail("Something went wrong!") # Check task status - updated = await store.get_task(task.taskId) + updated = await store.get_task(task.task_id) assert updated is not None assert updated.status == "failed" - assert updated.statusMessage == "Something went wrong!" + assert updated.status_message == "Something went wrong!" store.cleanup() @@ -101,13 +101,13 @@ def test_create_task_state_generates_id() -> None: task1 = create_task_state(TaskMetadata(ttl=60000)) task2 = create_task_state(TaskMetadata(ttl=60000)) - assert task1.taskId != task2.taskId + assert task1.task_id != task2.task_id def test_create_task_state_uses_provided_id() -> None: """create_task_state uses the provided task ID.""" task = create_task_state(TaskMetadata(ttl=60000), task_id="my-task-123") - assert task.taskId == "my-task-123" + assert task.task_id == "my-task-123" def test_create_task_state_null_ttl() -> None: @@ -119,7 +119,7 @@ def test_create_task_state_null_ttl() -> None: def test_create_task_state_has_created_at() -> None: """create_task_state sets createdAt timestamp.""" task = create_task_state(TaskMetadata(ttl=60000)) - assert task.createdAt is not None + assert task.created_at is not None @pytest.mark.anyio @@ -148,7 +148,7 @@ async def test_task_execution_auto_fails_on_exception() -> None: failed_task = await store.get_task("exec-fail-1") assert failed_task is not None assert failed_task.status == "failed" - assert "Oops!" in (failed_task.statusMessage or "") + assert "Oops!" in (failed_task.status_message or "") store.cleanup() diff --git a/tests/experimental/tasks/server/test_integration.py b/tests/experimental/tasks/server/test_integration.py index ba61dfcead..3d7d89c344 100644 --- a/tests/experimental/tasks/server/test_integration.py +++ b/tests/experimental/tasks/server/test_integration.py @@ -81,11 +81,11 @@ async def list_tools(): Tool( name="process_data", description="Process data asynchronously", - inputSchema={ + input_schema={ "type": "object", "properties": {"input": {"type": "string"}}, }, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + execution=ToolExecution(task_support=TASK_REQUIRED), ) ] @@ -101,11 +101,11 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextCon # 2. Create event to signal completion (for testing) done_event = Event() - app.task_done_events[task.taskId] = done_event + app.task_done_events[task.task_id] = done_event # 3. Define work function using task_execution for safety async def do_work(): - async with task_execution(task.taskId, app.store) as task_ctx: + async with task_execution(task.task_id, app.store) as task_ctx: await task_ctx.update_status("Processing input...") # Simulate work input_value = arguments.get("input", "") @@ -126,16 +126,16 @@ async def do_work(): @server.experimental.get_task() async def handle_get_task(request: GetTaskRequest) -> GetTaskResult: app = server.request_context.lifespan_context - task = await app.store.get_task(request.params.taskId) - assert task is not None, f"Test setup error: task {request.params.taskId} should exist" + task = await app.store.get_task(request.params.task_id) + assert task is not None, f"Test setup error: task {request.params.task_id} should exist" return GetTaskResult( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=task.pollInterval, + poll_interval=task.poll_interval, ) @server.experimental.get_task_result() @@ -143,8 +143,8 @@ async def handle_get_task_result( request: GetTaskPayloadRequest, ) -> GetTaskPayloadResult: app = server.request_context.lifespan_context - result = await app.store.get_result(request.params.taskId) - assert result is not None, f"Test setup error: result for {request.params.taskId} should exist" + result = await app.store.get_result(request.params.task_id) + assert result is not None, f"Test setup error: result for {request.params.task_id} should exist" assert isinstance(result, CallToolResult) # Return as GetTaskPayloadResult (which accepts extra fields) return GetTaskPayloadResult(**result.model_dump()) @@ -205,22 +205,22 @@ async def run_server(app_context: AppContext): assert isinstance(create_result, CreateTaskResult) assert create_result.task.status == "working" - task_id = create_result.task.taskId + task_id = create_result.task.task_id # === Step 2: Wait for task to complete === await app_context.task_done_events[task_id].wait() task_status = await client_session.send_request( - ClientRequest(GetTaskRequest(params=GetTaskRequestParams(taskId=task_id))), + ClientRequest(GetTaskRequest(params=GetTaskRequestParams(task_id=task_id))), GetTaskResult, ) - assert task_status.taskId == task_id + assert task_status.task_id == task_id assert task_status.status == "completed" # === Step 3: Retrieve the actual result === task_result = await client_session.send_request( - ClientRequest(GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId=task_id))), + ClientRequest(GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task_id))), CallToolResult, ) @@ -245,7 +245,7 @@ async def list_tools(): Tool( name="failing_task", description="A task that fails", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ) ] @@ -260,10 +260,10 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextCon # Create event to signal completion (for testing) done_event = Event() - app.task_done_events[task.taskId] = done_event + app.task_done_events[task.task_id] = done_event async def do_failing_work(): - async with task_execution(task.taskId, app.store) as task_ctx: + async with task_execution(task.task_id, app.store) as task_ctx: await task_ctx.update_status("About to fail...") raise RuntimeError("Something went wrong!") # Note: complete() is never called, but task_execution @@ -279,16 +279,16 @@ async def do_failing_work(): @server.experimental.get_task() async def handle_get_task(request: GetTaskRequest) -> GetTaskResult: app = server.request_context.lifespan_context - task = await app.store.get_task(request.params.taskId) - assert task is not None, f"Test setup error: task {request.params.taskId} should exist" + task = await app.store.get_task(request.params.task_id) + assert task is not None, f"Test setup error: task {request.params.task_id} should exist" return GetTaskResult( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=task.pollInterval, + poll_interval=task.poll_interval, ) # Set up streams @@ -340,18 +340,18 @@ async def run_server(app_context: AppContext): CreateTaskResult, ) - task_id = create_result.task.taskId + task_id = create_result.task.task_id # Wait for task to complete (even though it fails) await app_context.task_done_events[task_id].wait() # Check that task was auto-failed task_status = await client_session.send_request( - ClientRequest(GetTaskRequest(params=GetTaskRequestParams(taskId=task_id))), + ClientRequest(GetTaskRequest(params=GetTaskRequestParams(task_id=task_id))), GetTaskResult, ) assert task_status.status == "failed" - assert task_status.statusMessage == "Something went wrong!" + assert task_status.status_message == "Something went wrong!" tg.cancel_scope.cancel() diff --git a/tests/experimental/tasks/server/test_run_task_flow.py b/tests/experimental/tasks/server/test_run_task_flow.py index 9e21746e28..ebfc427891 100644 --- a/tests/experimental/tasks/server/test_run_task_flow.py +++ b/tests/experimental/tasks/server/test_run_task_flow.py @@ -69,8 +69,8 @@ async def list_tools() -> list[Tool]: Tool( name="simple_task", description="A simple task", - inputSchema={"type": "object", "properties": {"input": {"type": "string"}}}, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + input_schema={"type": "object", "properties": {"input": {"type": "string"}}}, + execution=ToolExecution(task_support=TASK_REQUIRED), ) ] @@ -119,7 +119,7 @@ async def run_client() -> None: ) # Should get CreateTaskResult - task_id = result.task.taskId + task_id = result.task.task_id assert result.task.status == "working" # Wait for work to complete @@ -157,8 +157,8 @@ async def list_tools() -> list[Tool]: Tool( name="failing_task", description="A task that fails", - inputSchema={"type": "object"}, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + input_schema={"type": "object"}, + execution=ToolExecution(task_support=TASK_REQUIRED), ) ] @@ -188,7 +188,7 @@ async def run_client() -> None: await client_session.initialize() result = await client_session.experimental.call_tool_as_task("failing_task", {}) - task_id = result.task.taskId + task_id = result.task.task_id # Wait for work to fail with anyio.fail_after(5): @@ -201,7 +201,7 @@ async def run_client() -> None: if task_status.status == "failed": # pragma: no branch break - assert "Something went wrong" in (task_status.statusMessage or "") + assert "Something went wrong" in (task_status.status_message or "") async with anyio.create_task_group() as tg: tg.start_soon(run_server) @@ -363,8 +363,8 @@ async def list_tools() -> list[Tool]: Tool( name="task_with_immediate", description="A task with immediate response", - inputSchema={"type": "object"}, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + input_schema={"type": "object"}, + execution=ToolExecution(task_support=TASK_REQUIRED), ) ] @@ -422,8 +422,8 @@ async def list_tools() -> list[Tool]: Tool( name="manual_complete_task", description="A task that manually completes", - inputSchema={"type": "object"}, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + input_schema={"type": "object"}, + execution=ToolExecution(task_support=TASK_REQUIRED), ) ] @@ -457,7 +457,7 @@ async def run_client() -> None: await client_session.initialize() result = await client_session.experimental.call_tool_as_task("manual_complete_task", {}) - task_id = result.task.taskId + task_id = result.task.task_id with anyio.fail_after(5): await work_completed.wait() @@ -488,8 +488,8 @@ async def list_tools() -> list[Tool]: Tool( name="manual_cancel_task", description="A task that manually cancels then raises", - inputSchema={"type": "object"}, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + input_schema={"type": "object"}, + execution=ToolExecution(task_support=TASK_REQUIRED), ) ] @@ -522,7 +522,7 @@ async def run_client() -> None: await client_session.initialize() result = await client_session.experimental.call_tool_as_task("manual_cancel_task", {}) - task_id = result.task.taskId + task_id = result.task.task_id with anyio.fail_after(5): await work_completed.wait() @@ -535,7 +535,7 @@ async def run_client() -> None: break # Task should still be failed (from manual fail, not auto-fail from exception) - assert status.statusMessage == "Manually failed" # Not "This error should not change status" + assert status.status_message == "Manually failed" # Not "This error should not change status" async with anyio.create_task_group() as tg: tg.start_soon(run_server) diff --git a/tests/experimental/tasks/server/test_server.py b/tests/experimental/tasks/server/test_server.py index 38ab7d7ce0..49810df12d 100644 --- a/tests/experimental/tasks/server/test_server.py +++ b/tests/experimental/tasks/server/test_server.py @@ -64,20 +64,20 @@ async def test_list_tasks_handler() -> None: now = datetime.now(timezone.utc) test_tasks = [ Task( - taskId="task-1", + task_id="task-1", status="working", - createdAt=now, - lastUpdatedAt=now, + created_at=now, + last_updated_at=now, ttl=60000, - pollInterval=1000, + poll_interval=1000, ), Task( - taskId="task-2", + task_id="task-2", status="completed", - createdAt=now, - lastUpdatedAt=now, + created_at=now, + last_updated_at=now, ttl=60000, - pollInterval=1000, + poll_interval=1000, ), ] @@ -92,8 +92,8 @@ async def handle_list_tasks(request: ListTasksRequest) -> ListTasksResult: assert isinstance(result, ServerResult) assert isinstance(result.root, ListTasksResult) assert len(result.root.tasks) == 2 - assert result.root.tasks[0].taskId == "task-1" - assert result.root.tasks[1].taskId == "task-2" + assert result.root.tasks[0].task_id == "task-1" + assert result.root.tasks[1].task_id == "task-2" @pytest.mark.anyio @@ -105,24 +105,24 @@ async def test_get_task_handler() -> None: async def handle_get_task(request: GetTaskRequest) -> GetTaskResult: now = datetime.now(timezone.utc) return GetTaskResult( - taskId=request.params.taskId, + task_id=request.params.task_id, status="working", - createdAt=now, - lastUpdatedAt=now, + created_at=now, + last_updated_at=now, ttl=60000, - pollInterval=1000, + poll_interval=1000, ) handler = server.request_handlers[GetTaskRequest] request = GetTaskRequest( method="tasks/get", - params=GetTaskRequestParams(taskId="test-task-123"), + params=GetTaskRequestParams(task_id="test-task-123"), ) result = await handler(request) assert isinstance(result, ServerResult) assert isinstance(result.root, GetTaskResult) - assert result.root.taskId == "test-task-123" + assert result.root.task_id == "test-task-123" assert result.root.status == "working" @@ -138,7 +138,7 @@ async def handle_get_task_result(request: GetTaskPayloadRequest) -> GetTaskPaylo handler = server.request_handlers[GetTaskPayloadRequest] request = GetTaskPayloadRequest( method="tasks/result", - params=GetTaskPayloadRequestParams(taskId="test-task-123"), + params=GetTaskPayloadRequestParams(task_id="test-task-123"), ) result = await handler(request) @@ -155,23 +155,23 @@ async def test_cancel_task_handler() -> None: async def handle_cancel_task(request: CancelTaskRequest) -> CancelTaskResult: now = datetime.now(timezone.utc) return CancelTaskResult( - taskId=request.params.taskId, + task_id=request.params.task_id, status="cancelled", - createdAt=now, - lastUpdatedAt=now, + created_at=now, + last_updated_at=now, ttl=60000, ) handler = server.request_handlers[CancelTaskRequest] request = CancelTaskRequest( method="tasks/cancel", - params=CancelTaskRequestParams(taskId="test-task-123"), + params=CancelTaskRequestParams(task_id="test-task-123"), ) result = await handler(request) assert isinstance(result, ServerResult) assert isinstance(result.root, CancelTaskResult) - assert result.root.taskId == "test-task-123" + assert result.root.task_id == "test-task-123" assert result.root.status == "cancelled" @@ -232,20 +232,20 @@ async def list_tools(): Tool( name="quick_tool", description="Fast tool", - inputSchema={"type": "object", "properties": {}}, - execution=ToolExecution(taskSupport=TASK_FORBIDDEN), + input_schema={"type": "object", "properties": {}}, + execution=ToolExecution(task_support=TASK_FORBIDDEN), ), Tool( name="long_tool", description="Long running tool", - inputSchema={"type": "object", "properties": {}}, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + input_schema={"type": "object", "properties": {}}, + execution=ToolExecution(task_support=TASK_REQUIRED), ), Tool( name="flexible_tool", description="Can be either", - inputSchema={"type": "object", "properties": {}}, - execution=ToolExecution(taskSupport=TASK_OPTIONAL), + input_schema={"type": "object", "properties": {}}, + execution=ToolExecution(task_support=TASK_OPTIONAL), ), ] @@ -258,11 +258,11 @@ async def list_tools(): tools = result.root.tools assert tools[0].execution is not None - assert tools[0].execution.taskSupport == TASK_FORBIDDEN + assert tools[0].execution.task_support == TASK_FORBIDDEN assert tools[1].execution is not None - assert tools[1].execution.taskSupport == TASK_REQUIRED + assert tools[1].execution.task_support == TASK_REQUIRED assert tools[2].execution is not None - assert tools[2].execution.taskSupport == TASK_OPTIONAL + assert tools[2].execution.task_support == TASK_OPTIONAL @pytest.mark.anyio @@ -277,8 +277,8 @@ async def list_tools(): Tool( name="long_task", description="A long running task", - inputSchema={"type": "object", "properties": {}}, - execution=ToolExecution(taskSupport="optional"), + input_schema={"type": "object", "properties": {}}, + execution=ToolExecution(task_support="optional"), ) ] @@ -361,7 +361,7 @@ async def list_tools(): Tool( name="test_tool", description="Test tool", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ) ] @@ -513,33 +513,33 @@ async def run_server() -> None: ListTasksResult, ) assert len(list_result.tasks) == 1 - assert list_result.tasks[0].taskId == task.taskId + assert list_result.tasks[0].task_id == task.task_id # Test get_task (default handler - found) get_result = await client_session.send_request( - ClientRequest(GetTaskRequest(params=GetTaskRequestParams(taskId=task.taskId))), + ClientRequest(GetTaskRequest(params=GetTaskRequestParams(task_id=task.task_id))), GetTaskResult, ) - assert get_result.taskId == task.taskId + assert get_result.task_id == task.task_id assert get_result.status == "working" # Test get_task (default handler - not found path) with pytest.raises(McpError, match="not found"): await client_session.send_request( - ClientRequest(GetTaskRequest(params=GetTaskRequestParams(taskId="nonexistent-task"))), + ClientRequest(GetTaskRequest(params=GetTaskRequestParams(task_id="nonexistent-task"))), GetTaskResult, ) # Create a completed task to test get_task_result completed_task = await store.create_task(TaskMetadata(ttl=60000)) await store.store_result( - completed_task.taskId, CallToolResult(content=[TextContent(type="text", text="Test result")]) + completed_task.task_id, CallToolResult(content=[TextContent(type="text", text="Test result")]) ) - await store.update_task(completed_task.taskId, status="completed") + await store.update_task(completed_task.task_id, status="completed") # Test get_task_result (default handler) payload_result = await client_session.send_request( - ClientRequest(GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId=completed_task.taskId))), + ClientRequest(GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=completed_task.task_id))), GetTaskPayloadResult, ) # The result should have the related-task metadata @@ -548,10 +548,10 @@ async def run_server() -> None: # Test cancel_task (default handler) cancel_result = await client_session.send_request( - ClientRequest(CancelTaskRequest(params=CancelTaskRequestParams(taskId=task.taskId))), + ClientRequest(CancelTaskRequest(params=CancelTaskRequestParams(task_id=task.task_id))), CancelTaskResult, ) - assert cancel_result.taskId == task.taskId + assert cancel_result.task_id == task.task_id assert cancel_result.status == "cancelled" tg.cancel_scope.cancel() @@ -576,7 +576,7 @@ async def test_build_elicit_form_request() -> None: # Test without task_id request = server_session._build_elicit_form_request( message="Test message", - requestedSchema={"type": "object", "properties": {"answer": {"type": "string"}}}, + requested_schema={"type": "object", "properties": {"answer": {"type": "string"}}}, ) assert request.method == "elicitation/create" assert request.params is not None @@ -585,7 +585,7 @@ async def test_build_elicit_form_request() -> None: # Test with related_task_id (adds related-task metadata) request_with_task = server_session._build_elicit_form_request( message="Task message", - requestedSchema={"type": "object"}, + requested_schema={"type": "object"}, related_task_id="test-task-123", ) assert request_with_task.method == "elicitation/create" diff --git a/tests/experimental/tasks/server/test_server_task_context.py b/tests/experimental/tasks/server/test_server_task_context.py index 3d6b16f482..0fe563a75c 100644 --- a/tests/experimental/tasks/server/test_server_task_context.py +++ b/tests/experimental/tasks/server/test_server_task_context.py @@ -45,7 +45,7 @@ async def test_server_task_context_properties() -> None: ) assert ctx.task_id == "test-123" - assert ctx.task.taskId == "test-123" + assert ctx.task.task_id == "test-123" assert ctx.is_cancelled is False store.cleanup() @@ -181,7 +181,7 @@ async def test_elicit_raises_when_client_lacks_capability() -> None: ) with pytest.raises(McpError) as exc_info: - await ctx.elicit(message="Test?", requestedSchema={"type": "object"}) + await ctx.elicit(message="Test?", requested_schema={"type": "object"}) assert "elicitation capability" in exc_info.value.error.message mock_session.check_client_capability.assert_called_once() @@ -232,7 +232,7 @@ async def test_elicit_raises_without_handler() -> None: ) with pytest.raises(RuntimeError, match="handler is required"): - await ctx.elicit(message="Test?", requestedSchema={"type": "object"}) + await ctx.elicit(message="Test?", requested_schema={"type": "object"}) store.cleanup() @@ -320,22 +320,22 @@ async def run_elicit() -> None: nonlocal elicit_result elicit_result = await ctx.elicit( message="Test?", - requestedSchema={"type": "object"}, + requested_schema={"type": "object"}, ) async with anyio.create_task_group() as tg: tg.start_soon(run_elicit) # Wait for request to be queued - await queue.wait_for_message(task.taskId) + await queue.wait_for_message(task.task_id) # Verify task is in input_required status - updated_task = await store.get_task(task.taskId) + updated_task = await store.get_task(task.task_id) assert updated_task is not None assert updated_task.status == "input_required" # Dequeue and simulate response - msg = await queue.dequeue(task.taskId) + msg = await queue.dequeue(task.task_id) assert msg is not None assert msg.resolver is not None @@ -348,7 +348,7 @@ async def run_elicit() -> None: assert elicit_result.content == {"name": "Alice"} # Verify task is back to working - final_task = await store.get_task(task.taskId) + final_task = await store.get_task(task.task_id) assert final_task is not None assert final_task.status == "working" @@ -396,15 +396,15 @@ async def run_elicit_url() -> None: tg.start_soon(run_elicit_url) # Wait for request to be queued - await queue.wait_for_message(task.taskId) + await queue.wait_for_message(task.task_id) # Verify task is in input_required status - updated_task = await store.get_task(task.taskId) + updated_task = await store.get_task(task.task_id) assert updated_task is not None assert updated_task.status == "input_required" # Dequeue and simulate response - msg = await queue.dequeue(task.taskId) + msg = await queue.dequeue(task.task_id) assert msg is not None assert msg.resolver is not None @@ -416,7 +416,7 @@ async def run_elicit_url() -> None: assert elicit_result.action == "accept" # Verify task is back to working - final_task = await store.get_task(task.taskId) + final_task = await store.get_task(task.task_id) assert final_task is not None assert final_task.status == "working" @@ -463,15 +463,15 @@ async def run_sampling() -> None: tg.start_soon(run_sampling) # Wait for request to be queued - await queue.wait_for_message(task.taskId) + await queue.wait_for_message(task.task_id) # Verify task is in input_required status - updated_task = await store.get_task(task.taskId) + updated_task = await store.get_task(task.task_id) assert updated_task is not None assert updated_task.status == "input_required" # Dequeue and simulate response - msg = await queue.dequeue(task.taskId) + msg = await queue.dequeue(task.task_id) assert msg is not None assert msg.resolver is not None @@ -491,7 +491,7 @@ async def run_sampling() -> None: assert sampling_result.model == "test-model" # Verify task is back to working - final_task = await store.get_task(task.taskId) + final_task = await store.get_task(task.task_id) assert final_task is not None assert final_task.status == "working" @@ -534,7 +534,7 @@ async def do_elicit() -> None: try: await ctx.elicit( message="Test?", - requestedSchema={"type": "object"}, + requested_schema={"type": "object"}, ) except anyio.get_cancelled_exc_class(): cancelled_error_raised = True @@ -543,15 +543,15 @@ async def do_elicit() -> None: tg.start_soon(do_elicit) # Wait for request to be queued - await queue.wait_for_message(task.taskId) + await queue.wait_for_message(task.task_id) # Verify task is in input_required status - updated_task = await store.get_task(task.taskId) + updated_task = await store.get_task(task.task_id) assert updated_task is not None assert updated_task.status == "input_required" # Get the queued message and set cancellation exception on its resolver - msg = await queue.dequeue(task.taskId) + msg = await queue.dequeue(task.task_id) assert msg is not None assert msg.resolver is not None @@ -559,7 +559,7 @@ async def do_elicit() -> None: msg.resolver.set_exception(asyncio.CancelledError()) # Verify task is back to working after cancellation - final_task = await store.get_task(task.taskId) + final_task = await store.get_task(task.task_id) assert final_task is not None assert final_task.status == "working" assert cancelled_error_raised @@ -612,15 +612,15 @@ async def do_sampling() -> None: tg.start_soon(do_sampling) # Wait for request to be queued - await queue.wait_for_message(task.taskId) + await queue.wait_for_message(task.task_id) # Verify task is in input_required status - updated_task = await store.get_task(task.taskId) + updated_task = await store.get_task(task.task_id) assert updated_task is not None assert updated_task.status == "input_required" # Get the queued message and set cancellation exception on its resolver - msg = await queue.dequeue(task.taskId) + msg = await queue.dequeue(task.task_id) assert msg is not None assert msg.resolver is not None @@ -628,7 +628,7 @@ async def do_sampling() -> None: msg.resolver.set_exception(asyncio.CancelledError()) # Verify task is back to working after cancellation - final_task = await store.get_task(task.taskId) + final_task = await store.get_task(task.task_id) assert final_task is not None assert final_task.status == "working" assert cancelled_error_raised @@ -646,7 +646,7 @@ async def test_elicit_as_task_raises_without_handler() -> None: # Create mock session with proper client capabilities mock_session = Mock() mock_session.client_params = InitializeRequestParams( - protocolVersion="2025-01-01", + protocol_version="2025-01-01", capabilities=ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability( @@ -654,7 +654,7 @@ async def test_elicit_as_task_raises_without_handler() -> None: ) ) ), - clientInfo=Implementation(name="test", version="1.0"), + client_info=Implementation(name="test", version="1.0"), ) ctx = ServerTaskContext( @@ -666,7 +666,7 @@ async def test_elicit_as_task_raises_without_handler() -> None: ) with pytest.raises(RuntimeError, match="handler is required for elicit_as_task"): - await ctx.elicit_as_task(message="Test?", requestedSchema={"type": "object"}) + await ctx.elicit_as_task(message="Test?", requested_schema={"type": "object"}) store.cleanup() @@ -681,15 +681,15 @@ async def test_create_message_as_task_raises_without_handler() -> None: # Create mock session with proper client capabilities mock_session = Mock() mock_session.client_params = InitializeRequestParams( - protocolVersion="2025-01-01", + protocol_version="2025-01-01", capabilities=ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability( - sampling=TasksSamplingCapability(createMessage=TasksCreateMessageCapability()) + sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) ), - clientInfo=Implementation(name="test", version="1.0"), + client_info=Implementation(name="test", version="1.0"), ) ctx = ServerTaskContext( diff --git a/tests/experimental/tasks/server/test_store.py b/tests/experimental/tasks/server/test_store.py index 2eac31dfe6..d6f297e6c1 100644 --- a/tests/experimental/tasks/server/test_store.py +++ b/tests/experimental/tasks/server/test_store.py @@ -24,13 +24,13 @@ async def test_create_and_get(store: InMemoryTaskStore) -> None: """Test InMemoryTaskStore create and get operations.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) - assert task.taskId is not None + assert task.task_id is not None assert task.status == "working" assert task.ttl == 60000 - retrieved = await store.get_task(task.taskId) + retrieved = await store.get_task(task.task_id) assert retrieved is not None - assert retrieved.taskId == task.taskId + assert retrieved.task_id == task.task_id assert retrieved.status == "working" @@ -42,12 +42,12 @@ async def test_create_with_custom_id(store: InMemoryTaskStore) -> None: task_id="my-custom-id", ) - assert task.taskId == "my-custom-id" + assert task.task_id == "my-custom-id" assert task.status == "working" retrieved = await store.get_task("my-custom-id") assert retrieved is not None - assert retrieved.taskId == "my-custom-id" + assert retrieved.task_id == "my-custom-id" @pytest.mark.anyio @@ -71,15 +71,15 @@ async def test_update_status(store: InMemoryTaskStore) -> None: """Test InMemoryTaskStore status updates.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) - updated = await store.update_task(task.taskId, status="completed", status_message="All done!") + updated = await store.update_task(task.task_id, status="completed", status_message="All done!") assert updated.status == "completed" - assert updated.statusMessage == "All done!" + assert updated.status_message == "All done!" - retrieved = await store.get_task(task.taskId) + retrieved = await store.get_task(task.task_id) assert retrieved is not None assert retrieved.status == "completed" - assert retrieved.statusMessage == "All done!" + assert retrieved.status_message == "All done!" @pytest.mark.anyio @@ -96,10 +96,10 @@ async def test_store_and_get_result(store: InMemoryTaskStore) -> None: # Store result result = CallToolResult(content=[TextContent(type="text", text="Result data")]) - await store.store_result(task.taskId, result) + await store.store_result(task.task_id, result) # Retrieve result - retrieved_result = await store.get_result(task.taskId) + retrieved_result = await store.get_result(task.task_id) assert retrieved_result == result @@ -114,7 +114,7 @@ async def test_get_result_nonexistent_returns_none(store: InMemoryTaskStore) -> async def test_get_result_no_result_returns_none(store: InMemoryTaskStore) -> None: """Test that getting result when none stored returns None.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) - result = await store.get_result(task.taskId) + result = await store.get_result(task.task_id) assert result is None @@ -172,14 +172,14 @@ async def test_delete_task(store: InMemoryTaskStore) -> None: """Test InMemoryTaskStore delete operation.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) - deleted = await store.delete_task(task.taskId) + deleted = await store.delete_task(task.task_id) assert deleted is True - retrieved = await store.get_task(task.taskId) + retrieved = await store.get_task(task.task_id) assert retrieved is None # Delete non-existent - deleted = await store.delete_task(task.taskId) + deleted = await store.delete_task(task.task_id) assert deleted is False @@ -210,7 +210,7 @@ async def test_create_task_with_null_ttl(store: InMemoryTaskStore) -> None: assert task.ttl is None # Task should persist (not expire) - retrieved = await store.get_task(task.taskId) + retrieved = await store.get_task(task.task_id) assert retrieved is not None @@ -221,19 +221,19 @@ async def test_task_expiration_cleanup(store: InMemoryTaskStore) -> None: task = await store.create_task(metadata=TaskMetadata(ttl=1)) # 1ms TTL # Manually force the expiry to be in the past - stored = store._tasks.get(task.taskId) + stored = store._tasks.get(task.task_id) assert stored is not None stored.expires_at = datetime.now(timezone.utc) - timedelta(seconds=10) # Task should still exist in internal dict but be expired - assert task.taskId in store._tasks + assert task.task_id in store._tasks # Any access operation should clean up expired tasks # list_tasks triggers cleanup tasks, _ = await store.list_tasks() # Expired task should be cleaned up - assert task.taskId not in store._tasks + assert task.task_id not in store._tasks assert len(tasks) == 0 @@ -244,17 +244,17 @@ async def test_task_with_null_ttl_never_expires(store: InMemoryTaskStore) -> Non task = await store.create_task(metadata=TaskMetadata(ttl=None)) # Verify internal storage has no expiry - stored = store._tasks.get(task.taskId) + stored = store._tasks.get(task.task_id) assert stored is not None assert stored.expires_at is None # Access operations should NOT remove this task await store.list_tasks() - await store.get_task(task.taskId) + await store.get_task(task.task_id) # Task should still exist - assert task.taskId in store._tasks - retrieved = await store.get_task(task.taskId) + assert task.task_id in store._tasks + retrieved = await store.get_task(task.task_id) assert retrieved is not None @@ -265,13 +265,13 @@ async def test_terminal_task_ttl_reset(store: InMemoryTaskStore) -> None: task = await store.create_task(metadata=TaskMetadata(ttl=60000)) # 60s # Get the initial expiry - stored = store._tasks.get(task.taskId) + stored = store._tasks.get(task.task_id) assert stored is not None initial_expiry = stored.expires_at assert initial_expiry is not None # Update to terminal state (completed) - await store.update_task(task.taskId, status="completed") + await store.update_task(task.task_id, status="completed") # Expiry should be reset to a new time (from now + TTL) new_expiry = stored.expires_at @@ -291,16 +291,16 @@ async def test_terminal_status_transition_rejected(store: InMemoryTaskStore) -> task = await store.create_task(metadata=TaskMetadata(ttl=60000)) # Move to terminal state - await store.update_task(task.taskId, status=terminal_status) + await store.update_task(task.task_id, status=terminal_status) # Attempting to transition to any other status should raise with pytest.raises(ValueError, match="Cannot transition from terminal status"): - await store.update_task(task.taskId, status="working") + await store.update_task(task.task_id, status="working") # Also test transitioning to another terminal state other_terminal = "failed" if terminal_status != "failed" else "completed" with pytest.raises(ValueError, match="Cannot transition from terminal status"): - await store.update_task(task.taskId, status=other_terminal) + await store.update_task(task.task_id, status=other_terminal) @pytest.mark.anyio @@ -310,15 +310,15 @@ async def test_terminal_status_allows_same_status(store: InMemoryTaskStore) -> N This is not a transition, so it should be allowed (no-op). """ task = await store.create_task(metadata=TaskMetadata(ttl=60000)) - await store.update_task(task.taskId, status="completed") + await store.update_task(task.task_id, status="completed") # Setting the same status should not raise - updated = await store.update_task(task.taskId, status="completed") + updated = await store.update_task(task.task_id, status="completed") assert updated.status == "completed" # Updating just the message should also work - updated = await store.update_task(task.taskId, status_message="Updated message") - assert updated.statusMessage == "Updated message" + updated = await store.update_task(task.task_id, status_message="Updated message") + assert updated.status_message == "Updated message" @pytest.mark.anyio @@ -334,13 +334,13 @@ async def test_cancel_task_succeeds_for_working_task(store: InMemoryTaskStore) - task = await store.create_task(metadata=TaskMetadata(ttl=60000)) assert task.status == "working" - result = await cancel_task(store, task.taskId) + result = await cancel_task(store, task.task_id) - assert result.taskId == task.taskId + assert result.task_id == task.task_id assert result.status == "cancelled" # Verify store is updated - retrieved = await store.get_task(task.taskId) + retrieved = await store.get_task(task.task_id) assert retrieved is not None assert retrieved.status == "cancelled" @@ -359,10 +359,10 @@ async def test_cancel_task_rejects_nonexistent_task(store: InMemoryTaskStore) -> async def test_cancel_task_rejects_completed_task(store: InMemoryTaskStore) -> None: """Test cancel_task raises McpError with INVALID_PARAMS for completed task.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) - await store.update_task(task.taskId, status="completed") + await store.update_task(task.task_id, status="completed") with pytest.raises(McpError) as exc_info: - await cancel_task(store, task.taskId) + await cancel_task(store, task.task_id) assert exc_info.value.error.code == INVALID_PARAMS assert "terminal state 'completed'" in exc_info.value.error.message @@ -372,10 +372,10 @@ async def test_cancel_task_rejects_completed_task(store: InMemoryTaskStore) -> N async def test_cancel_task_rejects_failed_task(store: InMemoryTaskStore) -> None: """Test cancel_task raises McpError with INVALID_PARAMS for failed task.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) - await store.update_task(task.taskId, status="failed") + await store.update_task(task.task_id, status="failed") with pytest.raises(McpError) as exc_info: - await cancel_task(store, task.taskId) + await cancel_task(store, task.task_id) assert exc_info.value.error.code == INVALID_PARAMS assert "terminal state 'failed'" in exc_info.value.error.message @@ -385,10 +385,10 @@ async def test_cancel_task_rejects_failed_task(store: InMemoryTaskStore) -> None async def test_cancel_task_rejects_already_cancelled_task(store: InMemoryTaskStore) -> None: """Test cancel_task raises McpError with INVALID_PARAMS for already cancelled task.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) - await store.update_task(task.taskId, status="cancelled") + await store.update_task(task.task_id, status="cancelled") with pytest.raises(McpError) as exc_info: - await cancel_task(store, task.taskId) + await cancel_task(store, task.task_id) assert exc_info.value.error.code == INVALID_PARAMS assert "terminal state 'cancelled'" in exc_info.value.error.message @@ -398,9 +398,9 @@ async def test_cancel_task_rejects_already_cancelled_task(store: InMemoryTaskSto async def test_cancel_task_succeeds_for_input_required_task(store: InMemoryTaskStore) -> None: """Test cancel_task helper succeeds for a task in input_required status.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) - await store.update_task(task.taskId, status="input_required") + await store.update_task(task.task_id, status="input_required") - result = await cancel_task(store, task.taskId) + result = await cancel_task(store, task.task_id) - assert result.taskId == task.taskId + assert result.task_id == task.task_id assert result.status == "cancelled" diff --git a/tests/experimental/tasks/server/test_task_result_handler.py b/tests/experimental/tasks/server/test_task_result_handler.py index db5b9edc70..ed6c296b73 100644 --- a/tests/experimental/tasks/server/test_task_result_handler.py +++ b/tests/experimental/tasks/server/test_task_result_handler.py @@ -53,13 +53,13 @@ async def test_handle_returns_result_for_completed_task( """Test that handle() returns the stored result for a completed task.""" task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-task") result = CallToolResult(content=[TextContent(type="text", text="Done!")]) - await store.store_result(task.taskId, result) - await store.update_task(task.taskId, status="completed") + await store.store_result(task.task_id, result) + await store.update_task(task.task_id, status="completed") mock_session = Mock() mock_session.send_message = AsyncMock() - request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId=task.taskId)) + request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task.task_id)) response = await handler.handle(request, mock_session, "req-1") assert response is not None @@ -73,7 +73,7 @@ async def test_handle_raises_for_nonexistent_task( ) -> None: """Test that handle() raises McpError for nonexistent task.""" mock_session = Mock() - request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId="nonexistent")) + request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id="nonexistent")) with pytest.raises(McpError) as exc_info: await handler.handle(request, mock_session, "req-1") @@ -87,12 +87,12 @@ async def test_handle_returns_empty_result_when_no_result_stored( ) -> None: """Test that handle() returns minimal result when task completed without stored result.""" task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-task") - await store.update_task(task.taskId, status="completed") + await store.update_task(task.task_id, status="completed") mock_session = Mock() mock_session.send_message = AsyncMock() - request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId=task.taskId)) + request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task.task_id)) response = await handler.handle(request, mock_session, "req-1") assert response is not None @@ -116,8 +116,8 @@ async def test_handle_delivers_queued_messages( params={}, ), ) - await queue.enqueue(task.taskId, queued_msg) - await store.update_task(task.taskId, status="completed") + await queue.enqueue(task.task_id, queued_msg) + await store.update_task(task.task_id, status="completed") sent_messages: list[SessionMessage] = [] @@ -127,7 +127,7 @@ async def track_send(msg: SessionMessage) -> None: mock_session = Mock() mock_session.send_message = track_send - request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId=task.taskId)) + request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task.task_id)) await handler.handle(request, mock_session, "req-1") assert len(sent_messages) == 1 @@ -143,7 +143,7 @@ async def test_handle_waits_for_task_completion( mock_session = Mock() mock_session.send_message = AsyncMock() - request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(taskId=task.taskId)) + request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task.task_id)) result_holder: list[GetTaskPayloadResult | None] = [None] async def run_handle() -> None: @@ -153,11 +153,11 @@ async def run_handle() -> None: tg.start_soon(run_handle) # Wait for handler to start waiting (event gets created when wait starts) - while task.taskId not in store._update_events: + while task.task_id not in store._update_events: await anyio.sleep(0) - await store.store_result(task.taskId, CallToolResult(content=[TextContent(type="text", text="Done")])) - await store.update_task(task.taskId, status="completed") + await store.store_result(task.task_id, CallToolResult(content=[TextContent(type="text", text="Done")])) + await store.update_task(task.task_id, status="completed") assert result_holder[0] is not None @@ -248,12 +248,12 @@ async def test_deliver_registers_resolver_for_request_messages( resolver=resolver, original_request_id="inner-req-1", ) - await queue.enqueue(task.taskId, queued_msg) + await queue.enqueue(task.task_id, queued_msg) mock_session = Mock() mock_session.send_message = AsyncMock() - await handler._deliver_queued_messages(task.taskId, mock_session, "outer-req-1") + await handler._deliver_queued_messages(task.task_id, mock_session, "outer-req-1") assert "inner-req-1" in handler._pending_requests assert handler._pending_requests["inner-req-1"] is resolver @@ -278,12 +278,12 @@ async def test_deliver_skips_resolver_registration_when_no_original_id( resolver=resolver, original_request_id=None, # No original request ID ) - await queue.enqueue(task.taskId, queued_msg) + await queue.enqueue(task.task_id, queued_msg) mock_session = Mock() mock_session.send_message = AsyncMock() - await handler._deliver_queued_messages(task.taskId, mock_session, "outer-req-1") + await handler._deliver_queued_messages(task.task_id, mock_session, "outer-req-1") # Resolver should NOT be registered since original_request_id is None assert len(handler._pending_requests) == 0 @@ -307,10 +307,10 @@ async def failing_wait(task_id: str) -> None: # Queue a message to unblock the race via the queue path async def enqueue_later() -> None: # Wait for queue to start waiting (event gets created when wait starts) - while task.taskId not in queue._events: + while task.task_id not in queue._events: await anyio.sleep(0) await queue.enqueue( - task.taskId, + task.task_id, QueuedMessage( type="notification", message=JSONRPCRequest( @@ -325,7 +325,7 @@ async def enqueue_later() -> None: async with anyio.create_task_group() as tg: tg.start_soon(enqueue_later) # This should complete via the queue path even though store raises - await handler._wait_for_task_update(task.taskId) + await handler._wait_for_task_update(task.task_id) @pytest.mark.anyio @@ -344,11 +344,11 @@ async def failing_wait(task_id: str) -> None: # Update the store to unblock the race via the store path async def update_later() -> None: # Wait for store to start waiting (event gets created when wait starts) - while task.taskId not in store._update_events: + while task.task_id not in store._update_events: await anyio.sleep(0) - await store.update_task(task.taskId, status="completed") + await store.update_task(task.task_id, status="completed") async with anyio.create_task_group() as tg: tg.start_soon(update_later) # This should complete via the store path even though queue raises - await handler._wait_for_task_update(task.taskId) + await handler._wait_for_task_update(task.task_id) diff --git a/tests/experimental/tasks/test_capabilities.py b/tests/experimental/tasks/test_capabilities.py index e78f16fe3f..4298ebdebb 100644 --- a/tests/experimental/tasks/test_capabilities.py +++ b/tests/experimental/tasks/test_capabilities.py @@ -82,7 +82,7 @@ def test_sampling_create_message_required_but_client_missing(self) -> None: """When sampling.createMessage is required but client doesn't have it.""" required = ClientTasksCapability( requests=ClientTasksRequestsCapability( - sampling=TasksSamplingCapability(createMessage=TasksCreateMessageCapability()) + sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) client = ClientTasksCapability( @@ -96,12 +96,12 @@ def test_sampling_create_message_present(self) -> None: """When sampling.createMessage is required and client has it.""" required = ClientTasksCapability( requests=ClientTasksRequestsCapability( - sampling=TasksSamplingCapability(createMessage=TasksCreateMessageCapability()) + sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) client = ClientTasksCapability( requests=ClientTasksRequestsCapability( - sampling=TasksSamplingCapability(createMessage=TasksCreateMessageCapability()) + sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) assert check_tasks_capability(required, client) is True @@ -111,13 +111,13 @@ def test_both_elicitation_and_sampling_present(self) -> None: required = ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()), - sampling=TasksSamplingCapability(createMessage=TasksCreateMessageCapability()), + sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()), ) ) client = ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()), - sampling=TasksSamplingCapability(createMessage=TasksCreateMessageCapability()), + sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()), ) ) assert check_tasks_capability(required, client) is True @@ -145,7 +145,7 @@ def test_sampling_without_create_message_required(self) -> None: ) client = ClientTasksCapability( requests=ClientTasksRequestsCapability( - sampling=TasksSamplingCapability(createMessage=TasksCreateMessageCapability()) + sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) assert check_tasks_capability(required, client) is True @@ -220,7 +220,7 @@ def test_create_message_present(self) -> None: caps = ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability( - sampling=TasksSamplingCapability(createMessage=TasksCreateMessageCapability()) + sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) ) @@ -276,7 +276,7 @@ def test_passes_when_present(self) -> None: caps = ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability( - sampling=TasksSamplingCapability(createMessage=TasksCreateMessageCapability()) + sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) ) diff --git a/tests/experimental/tasks/test_elicitation_scenarios.py b/tests/experimental/tasks/test_elicitation_scenarios.py index be2b616018..9044156047 100644 --- a/tests/experimental/tasks/test_elicitation_scenarios.py +++ b/tests/experimental/tasks/test_elicitation_scenarios.py @@ -61,13 +61,13 @@ async def handle_augmented_elicitation( """Handle task-augmented elicitation by creating a client-side task.""" elicit_received.set() task = await client_task_store.create_task(task_metadata) - task_complete_events[task.taskId] = Event() + task_complete_events[task.task_id] = Event() async def complete_task() -> None: # Store result before updating status to avoid race condition - await client_task_store.store_result(task.taskId, elicit_response) - await client_task_store.update_task(task.taskId, status="completed") - task_complete_events[task.taskId].set() + await client_task_store.store_result(task.task_id, elicit_response) + await client_task_store.update_task(task.task_id, status="completed") + task_complete_events[task.task_id].set() context.session._task_group.start_soon(complete_task) # pyright: ignore[reportPrivateUsage] return CreateTaskResult(task=task) @@ -77,16 +77,16 @@ async def handle_get_task( params: Any, ) -> GetTaskResult: """Handle tasks/get from server.""" - task = await client_task_store.get_task(params.taskId) - assert task is not None, f"Task not found: {params.taskId}" + task = await client_task_store.get_task(params.task_id) + assert task is not None, f"Task not found: {params.task_id}" return GetTaskResult( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=100, + poll_interval=100, ) async def handle_get_task_result( @@ -94,11 +94,11 @@ async def handle_get_task_result( params: Any, ) -> GetTaskPayloadResult | ErrorData: """Handle tasks/result from server.""" - event = task_complete_events.get(params.taskId) - assert event is not None, f"No completion event for task: {params.taskId}" + event = task_complete_events.get(params.task_id) + assert event is not None, f"No completion event for task: {params.task_id}" await event.wait() - result = await client_task_store.get_result(params.taskId) - assert result is not None, f"Result not found for task: {params.taskId}" + result = await client_task_store.get_result(params.task_id) + assert result is not None, f"Result not found for task: {params.task_id}" return GetTaskPayloadResult.model_validate(result.model_dump(by_alias=True)) return ExperimentalTaskHandlers( @@ -129,13 +129,13 @@ async def handle_augmented_sampling( """Handle task-augmented sampling by creating a client-side task.""" sampling_received.set() task = await client_task_store.create_task(task_metadata) - task_complete_events[task.taskId] = Event() + task_complete_events[task.task_id] = Event() async def complete_task() -> None: # Store result before updating status to avoid race condition - await client_task_store.store_result(task.taskId, sampling_response) - await client_task_store.update_task(task.taskId, status="completed") - task_complete_events[task.taskId].set() + await client_task_store.store_result(task.task_id, sampling_response) + await client_task_store.update_task(task.task_id, status="completed") + task_complete_events[task.task_id].set() context.session._task_group.start_soon(complete_task) # pyright: ignore[reportPrivateUsage] return CreateTaskResult(task=task) @@ -145,16 +145,16 @@ async def handle_get_task( params: Any, ) -> GetTaskResult: """Handle tasks/get from server.""" - task = await client_task_store.get_task(params.taskId) - assert task is not None, f"Task not found: {params.taskId}" + task = await client_task_store.get_task(params.task_id) + assert task is not None, f"Task not found: {params.task_id}" return GetTaskResult( - taskId=task.taskId, + task_id=task.task_id, status=task.status, - statusMessage=task.statusMessage, - createdAt=task.createdAt, - lastUpdatedAt=task.lastUpdatedAt, + status_message=task.status_message, + created_at=task.created_at, + last_updated_at=task.last_updated_at, ttl=task.ttl, - pollInterval=100, + poll_interval=100, ) async def handle_get_task_result( @@ -162,11 +162,11 @@ async def handle_get_task_result( params: Any, ) -> GetTaskPayloadResult | ErrorData: """Handle tasks/result from server.""" - event = task_complete_events.get(params.taskId) - assert event is not None, f"No completion event for task: {params.taskId}" + event = task_complete_events.get(params.task_id) + assert event is not None, f"No completion event for task: {params.task_id}" await event.wait() - result = await client_task_store.get_result(params.taskId) - assert result is not None, f"Result not found for task: {params.taskId}" + result = await client_task_store.get_result(params.task_id) + assert result is not None, f"Result not found for task: {params.task_id}" return GetTaskPayloadResult.model_validate(result.model_dump(by_alias=True)) return ExperimentalTaskHandlers( @@ -193,7 +193,7 @@ async def list_tools() -> list[Tool]: Tool( name="confirm_action", description="Confirm an action", - inputSchema={"type": "object"}, + input_schema={"type": "object"}, ) ] @@ -204,7 +204,7 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> CallToolResu # Normal elicitation - expects immediate response result = await ctx.session.elicit( message="Please confirm the action", - requestedSchema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, + requested_schema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, ) confirmed = result.content.get("confirm", False) if result.content else False @@ -278,7 +278,7 @@ async def list_tools() -> list[Tool]: Tool( name="confirm_action", description="Confirm an action", - inputSchema={"type": "object"}, + input_schema={"type": "object"}, ) ] @@ -289,7 +289,7 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> CallToolResu # Task-augmented elicitation - server polls client result = await ctx.session.experimental.elicit_as_task( message="Please confirm the action", - requestedSchema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, + requested_schema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, ttl=60000, ) @@ -358,8 +358,8 @@ async def list_tools() -> list[Tool]: Tool( name="confirm_action", description="Confirm an action", - inputSchema={"type": "object"}, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + input_schema={"type": "object"}, + execution=ToolExecution(task_support=TASK_REQUIRED), ) ] @@ -372,7 +372,7 @@ async def work(task: ServerTaskContext) -> CallToolResult: # Normal elicitation within task - queued and delivered via tasks/result result = await task.elicit( message="Please confirm the action", - requestedSchema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, + requested_schema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, ) confirmed = result.content.get("confirm", False) if result.content else False @@ -413,7 +413,7 @@ async def run_client() -> None: # Call tool as task create_result = await client_session.experimental.call_tool_as_task("confirm_action", {}) - task_id = create_result.task.taskId + task_id = create_result.task.task_id assert create_result.task.status == "working" # Poll until input_required, then call tasks/result @@ -472,8 +472,8 @@ async def list_tools() -> list[Tool]: Tool( name="confirm_action", description="Confirm an action", - inputSchema={"type": "object"}, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + input_schema={"type": "object"}, + execution=ToolExecution(task_support=TASK_REQUIRED), ) ] @@ -486,7 +486,7 @@ async def work(task: ServerTaskContext) -> CallToolResult: # Task-augmented elicitation within task - server polls client result = await task.elicit_as_task( message="Please confirm the action", - requestedSchema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, + requested_schema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, ttl=60000, ) @@ -522,7 +522,7 @@ async def run_client() -> None: # Call tool as task create_result = await client_session.experimental.call_tool_as_task("confirm_action", {}) - task_id = create_result.task.taskId + task_id = create_result.task.task_id assert create_result.task.status == "working" # Poll until input_required or terminal, then call tasks/result @@ -572,7 +572,7 @@ async def list_tools() -> list[Tool]: Tool( name="generate_text", description="Generate text using sampling", - inputSchema={"type": "object"}, + input_schema={"type": "object"}, ) ] @@ -658,8 +658,8 @@ async def list_tools() -> list[Tool]: Tool( name="generate_text", description="Generate text using sampling", - inputSchema={"type": "object"}, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + input_schema={"type": "object"}, + execution=ToolExecution(task_support=TASK_REQUIRED), ) ] @@ -710,7 +710,7 @@ async def run_client() -> None: # Call tool as task create_result = await client_session.experimental.call_tool_as_task("generate_text", {}) - task_id = create_result.task.taskId + task_id = create_result.task.task_id assert create_result.task.status == "working" # Poll until input_required or terminal diff --git a/tests/experimental/tasks/test_request_context.py b/tests/experimental/tasks/test_request_context.py index 5fa5da81af..0c342d8340 100644 --- a/tests/experimental/tasks/test_request_context.py +++ b/tests/experimental/tasks/test_request_context.py @@ -108,8 +108,8 @@ def test_validate_for_tool_with_execution_required() -> None: tool = Tool( name="test", description="test", - inputSchema={"type": "object"}, - execution=ToolExecution(taskSupport=TASK_REQUIRED), + input_schema={"type": "object"}, + execution=ToolExecution(task_support=TASK_REQUIRED), ) error = exp.validate_for_tool(tool, raise_error=False) assert error is not None @@ -121,7 +121,7 @@ def test_validate_for_tool_without_execution() -> None: tool = Tool( name="test", description="test", - inputSchema={"type": "object"}, + input_schema={"type": "object"}, execution=None, ) error = exp.validate_for_tool(tool, raise_error=False) @@ -134,8 +134,8 @@ def test_validate_for_tool_optional_with_task() -> None: tool = Tool( name="test", description="test", - inputSchema={"type": "object"}, - execution=ToolExecution(taskSupport=TASK_OPTIONAL), + input_schema={"type": "object"}, + execution=ToolExecution(task_support=TASK_OPTIONAL), ) error = exp.validate_for_tool(tool, raise_error=False) assert error is None diff --git a/tests/experimental/tasks/test_spec_compliance.py b/tests/experimental/tasks/test_spec_compliance.py index 842bfa7e1f..36ffc50d3d 100644 --- a/tests/experimental/tasks/test_spec_compliance.py +++ b/tests/experimental/tasks/test_spec_compliance.py @@ -346,10 +346,10 @@ def test_model_immediate_response_in_meta(self) -> None: # CreateTaskResult can include model-immediate-response in _meta task = Task( - taskId="test-123", + task_id="test-123", status="working", - createdAt=TEST_DATETIME, - lastUpdatedAt=TEST_DATETIME, + created_at=TEST_DATETIME, + last_updated_at=TEST_DATETIME, ttl=60000, ) immediate_msg = "Task started, processing your request..." diff --git a/tests/issues/test_1027_win_unreachable_cleanup.py b/tests/issues/test_1027_win_unreachable_cleanup.py index dae44655ef..43044e4a98 100644 --- a/tests/issues/test_1027_win_unreachable_cleanup.py +++ b/tests/issues/test_1027_win_unreachable_cleanup.py @@ -89,7 +89,7 @@ def echo(text: str) -> str: async with ClientSession(read, write) as session: # Initialize the session result = await session.initialize() - assert result.protocolVersion in ["2024-11-05", "2025-06-18", "2025-11-25"] + assert result.protocol_version in ["2024-11-05", "2025-06-18", "2025-11-25"] # Verify startup marker was created assert Path(startup_marker).exists(), "Server startup marker not created" diff --git a/tests/issues/test_129_resource_templates.py b/tests/issues/test_129_resource_templates.py index 958773d127..1ebff7c92f 100644 --- a/tests/issues/test_129_resource_templates.py +++ b/tests/issues/test_129_resource_templates.py @@ -27,16 +27,16 @@ def get_user_profile(user_id: str) -> str: # pragma: no cover types.ListResourceTemplatesRequest(params=None) ) assert isinstance(result.root, types.ListResourceTemplatesResult) - templates = result.root.resourceTemplates + templates = result.root.resource_templates # Verify we get both templates back assert len(templates) == 2 # Verify template details greeting_template = next(t for t in templates if t.name == "get_greeting") # pragma: no cover - assert greeting_template.uriTemplate == "greeting://{name}" + assert greeting_template.uri_template == "greeting://{name}" assert greeting_template.description == "Get a personalized greeting" profile_template = next(t for t in templates if t.name == "get_user_profile") # pragma: no cover - assert profile_template.uriTemplate == "users://{user_id}/profile" + assert profile_template.uri_template == "users://{user_id}/profile" assert profile_template.description == "Dynamic user data" diff --git a/tests/issues/test_1338_icons_and_metadata.py b/tests/issues/test_1338_icons_and_metadata.py index adc37f1c6e..41df47ee4f 100644 --- a/tests/issues/test_1338_icons_and_metadata.py +++ b/tests/issues/test_1338_icons_and_metadata.py @@ -14,7 +14,7 @@ async def test_icons_and_website_url(): # Create test icon test_icon = Icon( src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==", - mimeType="image/png", + mime_type="image/png", sizes=["1x1"], ) @@ -51,7 +51,7 @@ def test_resource_template(city: str) -> str: # pragma: no cover assert mcp.icons is not None assert len(mcp.icons) == 1 assert mcp.icons[0].src == test_icon.src - assert mcp.icons[0].mimeType == test_icon.mimeType + assert mcp.icons[0].mime_type == test_icon.mime_type assert mcp.icons[0].sizes == test_icon.sizes # Test tool includes icon @@ -86,7 +86,7 @@ def test_resource_template(city: str) -> str: # pragma: no cover assert len(templates) == 1 template = templates[0] assert template.name == "test_resource_template" - assert template.uriTemplate == "test://weather/{city}" + assert template.uri_template == "test://weather/{city}" assert template.icons is not None assert len(template.icons) == 1 assert template.icons[0].src == test_icon.src @@ -96,9 +96,9 @@ async def test_multiple_icons(): """Test that multiple icons can be added to tools, resources, and prompts.""" # Create multiple test icons - icon1 = Icon(src="data:image/png;base64,icon1", mimeType="image/png", sizes=["16x16"]) - icon2 = Icon(src="data:image/png;base64,icon2", mimeType="image/png", sizes=["32x32"]) - icon3 = Icon(src="data:image/png;base64,icon3", mimeType="image/png", sizes=["64x64"]) + icon1 = Icon(src="data:image/png;base64,icon1", mime_type="image/png", sizes=["16x16"]) + icon2 = Icon(src="data:image/png;base64,icon2", mime_type="image/png", sizes=["32x32"]) + icon3 = Icon(src="data:image/png;base64,icon3", mime_type="image/png", sizes=["64x64"]) mcp = FastMCP("MultiIconServer") diff --git a/tests/issues/test_141_resource_templates.py b/tests/issues/test_141_resource_templates.py index 0a0484d894..2300f7f736 100644 --- a/tests/issues/test_141_resource_templates.py +++ b/tests/issues/test_141_resource_templates.py @@ -85,10 +85,10 @@ def get_user_profile(user_id: str) -> str: # List available resources resources = await session.list_resource_templates() assert isinstance(resources, ListResourceTemplatesResult) - assert len(resources.resourceTemplates) == 2 + assert len(resources.resource_templates) == 2 # Verify resource templates are listed correctly - templates = [r.uriTemplate for r in resources.resourceTemplates] + templates = [r.uri_template for r in resources.resource_templates] assert "resource://users/{user_id}/posts/{post_id}" in templates assert "resource://users/{user_id}/profile" in templates @@ -97,14 +97,14 @@ def get_user_profile(user_id: str) -> str: contents = result.contents[0] assert isinstance(contents, TextResourceContents) assert contents.text == "Post 456 by user 123" - assert contents.mimeType == "text/plain" + assert contents.mime_type == "text/plain" # Read another resource with valid parameters result = await session.read_resource(AnyUrl("resource://users/789/profile")) contents = result.contents[0] assert isinstance(contents, TextResourceContents) assert contents.text == "Profile for user 789" - assert contents.mimeType == "text/plain" + assert contents.mime_type == "text/plain" # Verify invalid resource URIs raise appropriate errors with pytest.raises(Exception): # Specific exception type may vary diff --git a/tests/issues/test_152_resource_mime_type.py b/tests/issues/test_152_resource_mime_type.py index ea411ea616..07c129aadf 100644 --- a/tests/issues/test_152_resource_mime_type.py +++ b/tests/issues/test_152_resource_mime_type.py @@ -45,19 +45,19 @@ def get_image_as_bytes() -> bytes: bytes_resource = mapping["test://image_bytes"] # Verify mime types - assert string_resource.mimeType == "image/png", "String resource mime type not respected" - assert bytes_resource.mimeType == "image/png", "Bytes resource mime type not respected" + assert string_resource.mime_type == "image/png", "String resource mime type not respected" + assert bytes_resource.mime_type == "image/png", "Bytes resource mime type not respected" # Also verify the content can be read correctly string_result = await client.read_resource(AnyUrl("test://image")) assert len(string_result.contents) == 1 assert getattr(string_result.contents[0], "text") == base64_string, "Base64 string mismatch" - assert string_result.contents[0].mimeType == "image/png", "String content mime type not preserved" + assert string_result.contents[0].mime_type == "image/png", "String content mime type not preserved" bytes_result = await client.read_resource(AnyUrl("test://image_bytes")) assert len(bytes_result.contents) == 1 assert base64.b64decode(getattr(bytes_result.contents[0], "blob")) == image_bytes, "Bytes mismatch" - assert bytes_result.contents[0].mimeType == "image/png", "Bytes content mime type not preserved" + assert bytes_result.contents[0].mime_type == "image/png", "Bytes content mime type not preserved" async def test_lowlevel_resource_mime_type(): @@ -70,11 +70,11 @@ async def test_lowlevel_resource_mime_type(): # Create test resources with specific mime types test_resources = [ - types.Resource(uri="test://image", name="test image", mimeType="image/png"), + types.Resource(uri="test://image", name="test image", mime_type="image/png"), types.Resource( uri="test://image_bytes", name="test image bytes", - mimeType="image/png", + mime_type="image/png", ), ] @@ -103,16 +103,16 @@ async def handle_read_resource(uri: str): bytes_resource = mapping["test://image_bytes"] # Verify mime types - assert string_resource.mimeType == "image/png", "String resource mime type not respected" - assert bytes_resource.mimeType == "image/png", "Bytes resource mime type not respected" + assert string_resource.mime_type == "image/png", "String resource mime type not respected" + assert bytes_resource.mime_type == "image/png", "Bytes resource mime type not respected" # Also verify the content can be read correctly string_result = await client.read_resource(AnyUrl("test://image")) assert len(string_result.contents) == 1 assert getattr(string_result.contents[0], "text") == base64_string, "Base64 string mismatch" - assert string_result.contents[0].mimeType == "image/png", "String content mime type not preserved" + assert string_result.contents[0].mime_type == "image/png", "String content mime type not preserved" bytes_result = await client.read_resource(AnyUrl("test://image_bytes")) assert len(bytes_result.contents) == 1 assert base64.b64decode(getattr(bytes_result.contents[0], "blob")) == image_bytes, "Bytes mismatch" - assert bytes_result.contents[0].mimeType == "image/png", "Bytes content mime type not preserved" + assert bytes_result.contents[0].mime_type == "image/png", "Bytes content mime type not preserved" diff --git a/tests/issues/test_1574_resource_uri_validation.py b/tests/issues/test_1574_resource_uri_validation.py index 10cb558262..c936af09f9 100644 --- a/tests/issues/test_1574_resource_uri_validation.py +++ b/tests/issues/test_1574_resource_uri_validation.py @@ -125,7 +125,7 @@ def test_resource_contents_uri_json_roundtrip(): contents = types.TextResourceContents( uri=uri_str, text="data", - mimeType="text/plain", + mime_type="text/plain", ) json_data = contents.model_dump(mode="json") restored = types.TextResourceContents.model_validate(json_data) diff --git a/tests/issues/test_1754_mime_type_parameters.py b/tests/issues/test_1754_mime_type_parameters.py index cd8239ad2a..0260a5b691 100644 --- a/tests/issues/test_1754_mime_type_parameters.py +++ b/tests/issues/test_1754_mime_type_parameters.py @@ -26,7 +26,7 @@ def widget() -> str: resources = await mcp.list_resources() assert len(resources) == 1 - assert resources[0].mimeType == "text/html;profile=mcp-app" + assert resources[0].mime_type == "text/html;profile=mcp-app" async def test_mime_type_with_parameters_and_space(): @@ -39,7 +39,7 @@ def data() -> str: resources = await mcp.list_resources() assert len(resources) == 1 - assert resources[0].mimeType == "application/json; charset=utf-8" + assert resources[0].mime_type == "application/json; charset=utf-8" async def test_mime_type_with_multiple_parameters(): @@ -52,7 +52,7 @@ def data() -> str: resources = await mcp.list_resources() assert len(resources) == 1 - assert resources[0].mimeType == "text/plain; charset=utf-8; format=fixed" + assert resources[0].mime_type == "text/plain; charset=utf-8; format=fixed" async def test_mime_type_preserved_in_read_resource(): @@ -67,4 +67,4 @@ def my_widget() -> str: # Read the resource result = await client.read_resource(AnyUrl("ui://my-widget")) assert len(result.contents) == 1 - assert result.contents[0].mimeType == "text/html;profile=mcp-app" + assert result.contents[0].mime_type == "text/html;profile=mcp-app" diff --git a/tests/issues/test_176_progress_token.py b/tests/issues/test_176_progress_token.py index eb5f19d64c..07c3ce3976 100644 --- a/tests/issues/test_176_progress_token.py +++ b/tests/issues/test_176_progress_token.py @@ -17,7 +17,7 @@ async def test_progress_token_zero_first_call(): # Create request context with progress token 0 mock_meta = MagicMock() - mock_meta.progressToken = 0 # This is the key test case - token is 0 + mock_meta.progress_token = 0 # This is the key test case - token is 0 request_context = RequestContext( request_id="test-request", diff --git a/tests/issues/test_88_random_error.py b/tests/issues/test_88_random_error.py index ac370ca160..a29231d77d 100644 --- a/tests/issues/test_88_random_error.py +++ b/tests/issues/test_88_random_error.py @@ -42,12 +42,12 @@ async def list_tools() -> list[types.Tool]: types.Tool( name="slow", description="A slow tool", - inputSchema={"type": "object"}, + input_schema={"type": "object"}, ), types.Tool( name="fast", description="A fast tool", - inputSchema={"type": "object"}, + input_schema={"type": "object"}, ), ] diff --git a/tests/server/fastmcp/prompts/test_base.py b/tests/server/fastmcp/prompts/test_base.py index 84c9712681..afc1ec6ea8 100644 --- a/tests/server/fastmcp/prompts/test_base.py +++ b/tests/server/fastmcp/prompts/test_base.py @@ -96,7 +96,7 @@ async def fn() -> UserMessage: resource=TextResourceContents( uri="file://file.txt", text="File contents", - mimeType="text/plain", + mime_type="text/plain", ), ) ) @@ -109,7 +109,7 @@ async def fn() -> UserMessage: resource=TextResourceContents( uri="file://file.txt", text="File contents", - mimeType="text/plain", + mime_type="text/plain", ), ) ) @@ -128,7 +128,7 @@ async def fn() -> list[Message]: resource=TextResourceContents( uri="file://file.txt", text="File contents", - mimeType="text/plain", + mime_type="text/plain", ), ) ), @@ -144,7 +144,7 @@ async def fn() -> list[Message]: resource=TextResourceContents( uri="file://file.txt", text="File contents", - mimeType="text/plain", + mime_type="text/plain", ), ) ), @@ -176,7 +176,7 @@ async def fn() -> dict[str, Any]: resource=TextResourceContents( uri="file://file.txt", text="File contents", - mimeType="text/plain", + mime_type="text/plain", ), ) ) diff --git a/tests/server/fastmcp/test_elicitation.py b/tests/server/fastmcp/test_elicitation.py index 597b291785..4ba5ac0007 100644 --- a/tests/server/fastmcp/test_elicitation.py +++ b/tests/server/fastmcp/test_elicitation.py @@ -290,7 +290,7 @@ async def defaults_tool(ctx: Context[ServerSession, None]) -> str: async def callback_schema_verify(context: RequestContext[ClientSession, None], params: ElicitRequestParams): # Verify the schema includes defaults assert isinstance(params, types.ElicitRequestFormParams), "Expected form mode elicitation" - schema = params.requestedSchema + schema = params.requested_schema props = schema["properties"] assert props["name"]["default"] == "Guest" diff --git a/tests/server/fastmcp/test_func_metadata.py b/tests/server/fastmcp/test_func_metadata.py index 61e524290e..d28726b5a9 100644 --- a/tests/server/fastmcp/test_func_metadata.py +++ b/tests/server/fastmcp/test_func_metadata.py @@ -850,7 +850,7 @@ class PersonClass(BaseModel): name: str def func_returning_annotated_tool_call_result() -> Annotated[CallToolResult, PersonClass]: # pragma: no cover - return CallToolResult(content=[], structuredContent={"name": "Brandon"}) + return CallToolResult(content=[], structured_content={"name": "Brandon"}) meta = func_metadata(func_returning_annotated_tool_call_result) @@ -870,7 +870,7 @@ class PersonClass(BaseModel): name: str def func_returning_annotated_tool_call_result() -> Annotated[CallToolResult, PersonClass]: # pragma: no cover - return CallToolResult(content=[], structuredContent={"person": "Brandon"}) + return CallToolResult(content=[], structured_content={"person": "Brandon"}) meta = func_metadata(func_returning_annotated_tool_call_result) diff --git a/tests/server/fastmcp/test_integration.py b/tests/server/fastmcp/test_integration.py index 70948bd7e2..4ebb28780f 100644 --- a/tests/server/fastmcp/test_integration.py +++ b/tests/server/fastmcp/test_integration.py @@ -258,7 +258,7 @@ async def test_basic_tools(server_transport: str, server_url: str) -> None: # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "Tool Example" + assert result.server_info.name == "Tool Example" assert result.capabilities.tools is not None # Test sum tool @@ -295,7 +295,7 @@ async def test_basic_resources(server_transport: str, server_url: str) -> None: # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "Resource Example" + assert result.server_info.name == "Resource Example" assert result.capabilities.resources is not None # Test document resource @@ -336,7 +336,7 @@ async def test_basic_prompts(server_transport: str, server_url: str) -> None: # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "Prompt Example" + assert result.server_info.name == "Prompt Example" assert result.capabilities.prompts is not None # Test review_code prompt @@ -396,7 +396,7 @@ async def message_handler(message: RequestResponder[ServerRequest, ClientResult] # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "Progress Example" + assert result.server_info.name == "Progress Example" # Test progress callback progress_updates = [] @@ -449,7 +449,7 @@ async def test_sampling(server_transport: str, server_url: str) -> None: # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "Sampling Example" + assert result.server_info.name == "Sampling Example" assert result.capabilities.tools is not None # Test sampling tool @@ -480,7 +480,7 @@ async def test_elicitation(server_transport: str, server_url: str) -> None: # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "Elicitation Example" + assert result.server_info.name == "Elicitation Example" # Test booking with unavailable date (triggers elicitation) booking_result = await session.call_tool( @@ -537,7 +537,7 @@ async def message_handler(message: RequestResponder[ServerRequest, ClientResult] # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "Notifications Example" + assert result.server_info.name == "Notifications Example" # Call tool that generates notifications tool_result = await session.call_tool("process_data", {"data": "test_data"}) @@ -578,7 +578,7 @@ async def test_completion(server_transport: str, server_url: str) -> None: # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "Example" + assert result.server_info.name == "Example" assert result.capabilities.resources is not None assert result.capabilities.prompts is not None @@ -635,7 +635,7 @@ async def test_fastmcp_quickstart(server_transport: str, server_url: str) -> Non # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "Demo" + assert result.server_info.name == "Demo" # Test add tool tool_result = await session.call_tool("add", {"a": 10, "b": 20}) @@ -673,7 +673,7 @@ async def test_structured_output(server_transport: str, server_url: str) -> None # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "Structured Output Example" + assert result.server_info.name == "Structured Output Example" # Test get_weather tool weather_result = await session.call_tool("get_weather", {"city": "New York"}) diff --git a/tests/server/fastmcp/test_parameter_descriptions.py b/tests/server/fastmcp/test_parameter_descriptions.py index 9f2386894c..340ca71603 100644 --- a/tests/server/fastmcp/test_parameter_descriptions.py +++ b/tests/server/fastmcp/test_parameter_descriptions.py @@ -23,7 +23,7 @@ def greet( tool = tools[0] # Check that parameter descriptions are present in the schema - properties = tool.inputSchema["properties"] + properties = tool.input_schema["properties"] assert "name" in properties assert properties["name"]["description"] == "The name to greet" assert "title" in properties diff --git a/tests/server/fastmcp/test_server.py b/tests/server/fastmcp/test_server.py index 87637fcb8a..f271bcd713 100644 --- a/tests/server/fastmcp/test_server.py +++ b/tests/server/fastmcp/test_server.py @@ -36,7 +36,7 @@ async def test_create_server(self): instructions="Server instructions", website_url="https://example.com/mcp_server", version="1.0", - icons=[Icon(src="https://example.com/icon.png", mimeType="image/png", sizes=["48x48", "96x96"])], + icons=[Icon(src="https://example.com/icon.png", mime_type="image/png", sizes=["48x48", "96x96"])], ) assert mcp.name == "FastMCP" assert mcp.title == "FastMCP Server" @@ -197,8 +197,8 @@ def audio_tool_fn(path: str) -> Audio: def mixed_content_tool_fn() -> list[ContentBlock]: return [ TextContent(type="text", text="Hello"), - ImageContent(type="image", data="abc", mimeType="image/png"), - AudioContent(type="audio", data="def", mimeType="audio/wav"), + ImageContent(type="image", data="abc", mime_type="image/png"), + AudioContent(type="audio", data="def", mime_type="audio/wav"), ] @@ -237,7 +237,7 @@ async def test_tool_exception_handling(self): content = result.content[0] assert isinstance(content, TextContent) assert "Test error" in content.text - assert result.isError is True + assert result.is_error is True @pytest.mark.anyio async def test_tool_error_handling(self): @@ -249,7 +249,7 @@ async def test_tool_error_handling(self): content = result.content[0] assert isinstance(content, TextContent) assert "Test error" in content.text - assert result.isError is True + assert result.is_error is True @pytest.mark.anyio async def test_tool_error_details(self): @@ -262,7 +262,7 @@ async def test_tool_error_details(self): assert isinstance(content, TextContent) assert isinstance(content.text, str) assert "Test error" in content.text - assert result.isError is True + assert result.is_error is True @pytest.mark.anyio async def test_tool_return_value_conversion(self): @@ -275,8 +275,8 @@ async def test_tool_return_value_conversion(self): assert isinstance(content, TextContent) assert content.text == "3" # Check structured content - int return type should have structured output - assert result.structuredContent is not None - assert result.structuredContent == {"result": 3} + assert result.structured_content is not None + assert result.structured_content == {"result": 3} @pytest.mark.anyio async def test_tool_image_helper(self, tmp_path: Path): @@ -292,12 +292,12 @@ async def test_tool_image_helper(self, tmp_path: Path): content = result.content[0] assert isinstance(content, ImageContent) assert content.type == "image" - assert content.mimeType == "image/png" + assert content.mime_type == "image/png" # Verify base64 encoding decoded = base64.b64decode(content.data) assert decoded == b"fake png data" # Check structured content - Image return type should NOT have structured output - assert result.structuredContent is None + assert result.structured_content is None @pytest.mark.anyio async def test_tool_audio_helper(self, tmp_path: Path): @@ -313,12 +313,12 @@ async def test_tool_audio_helper(self, tmp_path: Path): content = result.content[0] assert isinstance(content, AudioContent) assert content.type == "audio" - assert content.mimeType == "audio/wav" + assert content.mime_type == "audio/wav" # Verify base64 encoding decoded = base64.b64decode(content.data) assert decoded == b"fake wav data" # Check structured content - Image return type should NOT have structured output - assert result.structuredContent is None + assert result.structured_content is None @pytest.mark.parametrize( "filename,expected_mime_type", @@ -348,7 +348,7 @@ async def test_tool_audio_suffix_detection(self, tmp_path: Path, filename: str, content = result.content[0] assert isinstance(content, AudioContent) assert content.type == "audio" - assert content.mimeType == expected_mime_type + assert content.mime_type == expected_mime_type # Verify base64 encoding decoded = base64.b64decode(content.data) assert decoded == b"fake audio data" @@ -364,14 +364,14 @@ async def test_tool_mixed_content(self): assert isinstance(content1, TextContent) assert content1.text == "Hello" assert isinstance(content2, ImageContent) - assert content2.mimeType == "image/png" + assert content2.mime_type == "image/png" assert content2.data == "abc" assert isinstance(content3, AudioContent) - assert content3.mimeType == "audio/wav" + assert content3.mime_type == "audio/wav" assert content3.data == "def" - assert result.structuredContent is not None - assert "result" in result.structuredContent - structured_result = result.structuredContent["result"] + assert result.structured_content is not None + assert "result" in result.structured_content + structured_result = result.structured_content["result"] assert len(structured_result) == 3 expected_content = [ @@ -419,12 +419,12 @@ def mixed_list_fn() -> list: # type: ignore # Check image conversion content2 = result.content[1] assert isinstance(content2, ImageContent) - assert content2.mimeType == "image/png" + assert content2.mime_type == "image/png" assert base64.b64decode(content2.data) == b"test image data" # Check audio conversion content3 = result.content[2] assert isinstance(content3, AudioContent) - assert content3.mimeType == "audio/wav" + assert content3.mime_type == "audio/wav" assert base64.b64decode(content3.data) == b"test audio data" # Check dict conversion content4 = result.content[3] @@ -435,7 +435,7 @@ def mixed_list_fn() -> list: # type: ignore assert isinstance(content5, TextContent) assert content5.text == "direct content" # Check structured content - untyped list with Image objects should NOT have structured output - assert result.structuredContent is None + assert result.structured_content is None @pytest.mark.anyio async def test_tool_structured_output_basemodel(self): @@ -457,16 +457,16 @@ def get_user(user_id: int) -> UserOutput: # Check that the tool has outputSchema tools = await client.list_tools() tool = next(t for t in tools.tools if t.name == "get_user") - assert tool.outputSchema is not None - assert tool.outputSchema["type"] == "object" - assert "name" in tool.outputSchema["properties"] - assert "age" in tool.outputSchema["properties"] + assert tool.output_schema is not None + assert tool.output_schema["type"] == "object" + assert "name" in tool.output_schema["properties"] + assert "age" in tool.output_schema["properties"] # Call the tool and check structured output result = await client.call_tool("get_user", {"user_id": 123}) - assert result.isError is False - assert result.structuredContent is not None - assert result.structuredContent == {"name": "John Doe", "age": 30, "active": True} + assert result.is_error is False + assert result.structured_content is not None + assert result.structured_content == {"name": "John Doe", "age": 30, "active": True} # Content should be JSON serialized version assert len(result.content) == 1 assert isinstance(result.content[0], TextContent) @@ -487,17 +487,17 @@ def calculate_sum(a: int, b: int) -> int: # Check that the tool has outputSchema tools = await client.list_tools() tool = next(t for t in tools.tools if t.name == "calculate_sum") - assert tool.outputSchema is not None + assert tool.output_schema is not None # Primitive types are wrapped - assert tool.outputSchema["type"] == "object" - assert "result" in tool.outputSchema["properties"] - assert tool.outputSchema["properties"]["result"]["type"] == "integer" + assert tool.output_schema["type"] == "object" + assert "result" in tool.output_schema["properties"] + assert tool.output_schema["properties"]["result"]["type"] == "integer" # Call the tool result = await client.call_tool("calculate_sum", {"a": 5, "b": 7}) - assert result.isError is False - assert result.structuredContent is not None - assert result.structuredContent == {"result": 12} + assert result.is_error is False + assert result.structured_content is not None + assert result.structured_content == {"result": 12} @pytest.mark.anyio async def test_tool_structured_output_list(self): @@ -512,9 +512,9 @@ def get_numbers() -> list[int]: async with client_session(mcp._mcp_server) as client: result = await client.call_tool("get_numbers", {}) - assert result.isError is False - assert result.structuredContent is not None - assert result.structuredContent == {"result": [1, 2, 3, 4, 5]} + assert result.is_error is False + assert result.structured_content is not None + assert result.structured_content == {"result": [1, 2, 3, 4, 5]} @pytest.mark.anyio async def test_tool_structured_output_server_side_validation_error(self): @@ -528,8 +528,8 @@ def get_numbers() -> list[int]: async with client_session(mcp._mcp_server) as client: result = await client.call_tool("get_numbers", {}) - assert result.isError is True - assert result.structuredContent is None + assert result.is_error is True + assert result.structured_content is None assert len(result.content) == 1 assert isinstance(result.content[0], TextContent) @@ -554,17 +554,17 @@ def get_metadata() -> dict[str, Any]: # Check schema tools = await client.list_tools() tool = next(t for t in tools.tools if t.name == "get_metadata") - assert tool.outputSchema is not None - assert tool.outputSchema["type"] == "object" + assert tool.output_schema is not None + assert tool.output_schema["type"] == "object" # dict[str, Any] should have minimal schema assert ( - "additionalProperties" not in tool.outputSchema or tool.outputSchema.get("additionalProperties") is True + "additionalProperties" not in tool.output_schema or tool.output_schema.get("additionalProperties") is True ) # Call tool result = await client.call_tool("get_metadata", {}) - assert result.isError is False - assert result.structuredContent is not None + assert result.is_error is False + assert result.structured_content is not None expected = { "version": "1.0.0", "enabled": True, @@ -572,7 +572,7 @@ def get_metadata() -> dict[str, Any]: "tags": ["production", "stable"], "config": {"nested": {"value": 123}}, } - assert result.structuredContent == expected + assert result.structured_content == expected @pytest.mark.anyio async def test_tool_structured_output_dict_str_typed(self): @@ -589,14 +589,14 @@ def get_settings() -> dict[str, str]: # Check schema tools = await client.list_tools() tool = next(t for t in tools.tools if t.name == "get_settings") - assert tool.outputSchema is not None - assert tool.outputSchema["type"] == "object" - assert tool.outputSchema["additionalProperties"]["type"] == "string" + assert tool.output_schema is not None + assert tool.output_schema["type"] == "object" + assert tool.output_schema["additionalProperties"]["type"] == "string" # Call tool result = await client.call_tool("get_settings", {}) - assert result.isError is False - assert result.structuredContent == {"theme": "dark", "language": "en", "timezone": "UTC"} + assert result.is_error is False + assert result.structured_content == {"theme": "dark", "language": "en", "timezone": "UTC"} @pytest.mark.anyio async def test_remove_tool(self): @@ -656,7 +656,7 @@ async def test_remove_tool_and_call(self): # Verify tool works before removal async with client_session(mcp._mcp_server) as client: result = await client.call_tool("tool_fn", {"x": 1, "y": 2}) - assert not result.isError + assert not result.is_error content = result.content[0] assert isinstance(content, TextContent) assert content.text == "3" @@ -667,7 +667,7 @@ async def test_remove_tool_and_call(self): # Verify calling removed tool returns an error async with client_session(mcp._mcp_server) as client: result = await client.call_tool("tool_fn", {"x": 1, "y": 2}) - assert result.isError + assert result.is_error content = result.content[0] assert isinstance(content, TextContent) assert "Unknown tool" in content.text @@ -762,7 +762,7 @@ def get_data() -> str: # pragma: no cover assert resource.description == "get_data returns a string" assert resource.uri == "function://test" assert resource.name == "test_get_data" - assert resource.mimeType == "text/plain" + assert resource.mime_type == "text/plain" class TestServerResourceTemplates: @@ -892,8 +892,8 @@ def get_csv(user: str) -> str: assert len(templates) == 1 template = templates[0] - assert hasattr(template, "mimeType") - assert template.mimeType == "text/csv" + assert hasattr(template, "mime_type") + assert template.mime_type == "text/csv" async with client_session(mcp._mcp_server) as client: result = await client.read_resource("resource://bob/csv") @@ -1382,7 +1382,7 @@ def fn() -> Message: resource=TextResourceContents( uri="file://file.txt", text="File contents", - mimeType="text/plain", + mime_type="text/plain", ), ) ) @@ -1397,7 +1397,7 @@ def fn() -> Message: resource = content.resource assert isinstance(resource, TextResourceContents) assert resource.text == "File contents" - assert resource.mimeType == "text/plain" + assert resource.mime_type == "text/plain" @pytest.mark.anyio async def test_get_unknown_prompt(self): diff --git a/tests/server/fastmcp/test_title.py b/tests/server/fastmcp/test_title.py index da9443eb40..7986db08c6 100644 --- a/tests/server/fastmcp/test_title.py +++ b/tests/server/fastmcp/test_title.py @@ -26,10 +26,10 @@ async def test_server_name_title_description_version(): # Start server and connect client async with create_connected_server_and_client_session(mcp._mcp_server) as client: init_result = await client.initialize() - assert init_result.serverInfo.name == "TestServer" - assert init_result.serverInfo.title == "Test Server Title" - assert init_result.serverInfo.description == "This is a test server description." - assert init_result.serverInfo.version == "1.0" + assert init_result.server_info.name == "TestServer" + assert init_result.server_info.title == "Test Server Title" + assert init_result.server_info.description == "This is a test server description." + assert init_result.server_info.version == "1.0" @pytest.mark.anyio @@ -184,7 +184,7 @@ def titled_dynamic_resource(id: str) -> str: # pragma: no cover # List resource templates templates_result = await client.list_resource_templates() - templates = {tpl.uriTemplate: tpl for tpl in templates_result.resourceTemplates} + templates = {tpl.uri_template: tpl for tpl in templates_result.resource_templates} # Verify dynamic resource template assert "resource://dynamic/{id}" in templates @@ -203,17 +203,17 @@ async def test_get_display_name_utility(): """Test the get_display_name utility function.""" # Test tool precedence: title > annotations.title > name - tool_name_only = Tool(name="test_tool", inputSchema={}) + tool_name_only = Tool(name="test_tool", input_schema={}) assert get_display_name(tool_name_only) == "test_tool" - tool_with_title = Tool(name="test_tool", title="Test Tool", inputSchema={}) + tool_with_title = Tool(name="test_tool", title="Test Tool", input_schema={}) assert get_display_name(tool_with_title) == "Test Tool" - tool_with_annotations = Tool(name="test_tool", inputSchema={}, annotations=ToolAnnotations(title="Annotated Tool")) + tool_with_annotations = Tool(name="test_tool", input_schema={}, annotations=ToolAnnotations(title="Annotated Tool")) assert get_display_name(tool_with_annotations) == "Annotated Tool" tool_with_both = Tool( - name="test_tool", title="Primary Title", inputSchema={}, annotations=ToolAnnotations(title="Secondary Title") + name="test_tool", title="Primary Title", input_schema={}, annotations=ToolAnnotations(title="Secondary Title") ) assert get_display_name(tool_with_both) == "Primary Title" @@ -230,8 +230,8 @@ async def test_get_display_name_utility(): prompt_with_title = Prompt(name="test_prompt", title="Test Prompt") assert get_display_name(prompt_with_title) == "Test Prompt" - template = ResourceTemplate(uriTemplate="file://{id}", name="test_template") + template = ResourceTemplate(uri_template="file://{id}", name="test_template") assert get_display_name(template) == "test_template" - template_with_title = ResourceTemplate(uriTemplate="file://{id}", name="test_template", title="Test Template") + template_with_title = ResourceTemplate(uri_template="file://{id}", name="test_template", title="Test Template") assert get_display_name(template_with_title) == "Test Template" diff --git a/tests/server/fastmcp/test_tool_manager.py b/tests/server/fastmcp/test_tool_manager.py index d83d484744..b09ae7de15 100644 --- a/tests/server/fastmcp/test_tool_manager.py +++ b/tests/server/fastmcp/test_tool_manager.py @@ -426,8 +426,8 @@ def read_data(path: str) -> str: # pragma: no cover annotations = ToolAnnotations( title="File Reader", - readOnlyHint=True, - openWorldHint=False, + read_only_hint=True, + open_world_hint=False, ) manager = ToolManager() @@ -435,8 +435,8 @@ def read_data(path: str) -> str: # pragma: no cover assert tool.annotations is not None assert tool.annotations.title == "File Reader" - assert tool.annotations.readOnlyHint is True - assert tool.annotations.openWorldHint is False + assert tool.annotations.read_only_hint is True + assert tool.annotations.open_world_hint is False @pytest.mark.anyio async def test_tool_annotations_in_fastmcp(self): @@ -444,7 +444,7 @@ async def test_tool_annotations_in_fastmcp(self): app = FastMCP() - @app.tool(annotations=ToolAnnotations(title="Echo Tool", readOnlyHint=True)) + @app.tool(annotations=ToolAnnotations(title="Echo Tool", read_only_hint=True)) def echo(message: str) -> str: # pragma: no cover """Echo a message back.""" return message @@ -453,7 +453,7 @@ def echo(message: str) -> str: # pragma: no cover assert len(tools) == 1 assert tools[0].annotations is not None assert tools[0].annotations.title == "Echo Tool" - assert tools[0].annotations.readOnlyHint is True + assert tools[0].annotations.read_only_hint is True class TestStructuredOutput: @@ -794,7 +794,7 @@ async def test_metadata_with_annotations(self): app = FastMCP() metadata = {"custom": "value"} - annotations = ToolAnnotations(title="Combined Tool", readOnlyHint=True) + annotations = ToolAnnotations(title="Combined Tool", read_only_hint=True) @app.tool(meta=metadata, annotations=annotations) def combined_tool(data: str) -> str: # pragma: no cover @@ -806,7 +806,7 @@ def combined_tool(data: str) -> str: # pragma: no cover assert tools[0].meta == metadata assert tools[0].annotations is not None assert tools[0].annotations.title == "Combined Tool" - assert tools[0].annotations.readOnlyHint is True + assert tools[0].annotations.read_only_hint is True class TestRemoveTools: diff --git a/tests/server/fastmcp/test_url_elicitation.py b/tests/server/fastmcp/test_url_elicitation.py index a4d3b2e643..dce16d422d 100644 --- a/tests/server/fastmcp/test_url_elicitation.py +++ b/tests/server/fastmcp/test_url_elicitation.py @@ -32,7 +32,7 @@ async def request_api_key(ctx: Context[ServerSession, None]) -> str: async def elicitation_callback(context: RequestContext[ClientSession, None], params: ElicitRequestParams): assert params.mode == "url" assert params.url == "https://example.com/api_key_setup" - assert params.elicitationId == "test-elicitation-001" + assert params.elicitation_id == "test-elicitation-001" assert params.message == "Please provide your API key to continue." return ElicitResult(action="accept") @@ -160,9 +160,9 @@ async def elicitation_callback(context: RequestContext[ClientSession, None], par # Verify that this is URL mode assert params.mode == "url" assert isinstance(params, types.ElicitRequestURLParams) - # URL params have url and elicitationId, not requestedSchema + # URL params have url and elicitation_id, not requested_schema assert params.url == "https://example.com/test" - assert params.elicitationId == "test-001" + assert params.elicitation_id == "test-001" # Return without content - this is correct for URL mode return ElicitResult(action="accept") @@ -199,8 +199,8 @@ async def elicitation_callback(context: RequestContext[ClientSession, None], par # Verify form mode parameters assert params.mode == "form" assert isinstance(params, types.ElicitRequestFormParams) - # Form params have requestedSchema, not url/elicitationId - assert params.requestedSchema is not None + # Form params have requested_schema, not url/elicitation_id + assert params.requested_schema is not None return ElicitResult(action="accept", content={"name": "Alice"}) async with create_connected_server_and_client_session( @@ -341,7 +341,7 @@ async def use_deprecated_elicit(ctx: Context[ServerSession, None]) -> str: # Use the deprecated elicit() method which should call elicit_form() result = await ctx.session.elicit( message="Enter your email", - requestedSchema=EmailSchema.model_json_schema(), + requested_schema=EmailSchema.model_json_schema(), ) if result.action == "accept" and result.content: @@ -351,7 +351,7 @@ async def use_deprecated_elicit(ctx: Context[ServerSession, None]) -> str: async def elicitation_callback(context: RequestContext[ClientSession, None], params: ElicitRequestParams): # Verify this is form mode assert params.mode == "form" - assert params.requestedSchema is not None + assert params.requested_schema is not None return ElicitResult(action="accept", content={"email": "test@example.com"}) async with create_connected_server_and_client_session( @@ -382,7 +382,7 @@ async def direct_elicit_url(ctx: Context[ServerSession, None]) -> str: async def elicitation_callback(context: RequestContext[ClientSession, None], params: ElicitRequestParams): assert params.mode == "url" - assert params.elicitationId == "ctx-test-001" + assert params.elicitation_id == "ctx-test-001" return ElicitResult(action="accept") async with create_connected_server_and_client_session( diff --git a/tests/server/fastmcp/test_url_elicitation_error_throw.py b/tests/server/fastmcp/test_url_elicitation_error_throw.py index 2d7eda4ab4..27effe55b6 100644 --- a/tests/server/fastmcp/test_url_elicitation_error_throw.py +++ b/tests/server/fastmcp/test_url_elicitation_error_throw.py @@ -23,7 +23,7 @@ async def connect_service(service_name: str, ctx: Context[ServerSession, None]) mode="url", message=f"Authorization required to connect to {service_name}", url=f"https://{service_name}.example.com/oauth/authorize", - elicitationId=f"{service_name}-auth-001", + elicitation_id=f"{service_name}-auth-001", ) ] ) @@ -63,13 +63,13 @@ async def multi_auth(ctx: Context[ServerSession, None]) -> str: mode="url", message="GitHub authorization required", url="https://github.example.com/oauth", - elicitationId="github-auth", + elicitation_id="github-auth", ), types.ElicitRequestURLParams( mode="url", message="Google Drive authorization required", url="https://drive.google.com/oauth", - elicitationId="gdrive-auth", + elicitation_id="gdrive-auth", ), ] ) @@ -89,13 +89,13 @@ async def multi_auth(ctx: Context[ServerSession, None]) -> str: # Verify the reconstructed error has both elicitations assert len(url_error.elicitations) == 2 - assert url_error.elicitations[0].elicitationId == "github-auth" - assert url_error.elicitations[1].elicitationId == "gdrive-auth" + assert url_error.elicitations[0].elicitation_id == "github-auth" + assert url_error.elicitations[1].elicitation_id == "gdrive-auth" @pytest.mark.anyio async def test_normal_exceptions_still_return_error_result(): - """Test that normal exceptions still return CallToolResult with isError=True.""" + """Test that normal exceptions still return CallToolResult with is_error=True.""" mcp = FastMCP(name="NormalErrorServer") @mcp.tool(description="A tool that raises a normal exception") @@ -107,7 +107,7 @@ async def failing_tool(ctx: Context[ServerSession, None]) -> str: # Normal exceptions should be returned as error results, not McpError result = await client_session.call_tool("failing_tool", {}) - assert result.isError is True + assert result.is_error is True assert len(result.content) == 1 assert isinstance(result.content[0], types.TextContent) assert "Something went wrong" in result.content[0].text diff --git a/tests/server/lowlevel/test_server_listing.py b/tests/server/lowlevel/test_server_listing.py index 60823d967c..38998b4b42 100644 --- a/tests/server/lowlevel/test_server_listing.py +++ b/tests/server/lowlevel/test_server_listing.py @@ -80,7 +80,7 @@ async def test_list_tools_basic() -> None: Tool( name="tool1", description="First tool", - inputSchema={ + input_schema={ "type": "object", "properties": { "message": {"type": "string"}, @@ -91,7 +91,7 @@ async def test_list_tools_basic() -> None: Tool( name="tool2", description="Second tool", - inputSchema={ + input_schema={ "type": "object", "properties": { "count": {"type": "number"}, diff --git a/tests/server/test_cancel_handling.py b/tests/server/test_cancel_handling.py index 47c49bb62b..ef3ef49367 100644 --- a/tests/server/test_cancel_handling.py +++ b/tests/server/test_cancel_handling.py @@ -38,7 +38,7 @@ async def handle_list_tools() -> list[Tool]: Tool( name="test_tool", description="Tool for testing", - inputSchema={}, + input_schema={}, ) ] @@ -83,7 +83,7 @@ async def first_request(): ClientNotification( CancelledNotification( params=CancelledNotificationParams( - requestId=first_request_id, + request_id=first_request_id, reason="Testing server recovery", ), ) diff --git a/tests/server/test_lowlevel_input_validation.py b/tests/server/test_lowlevel_input_validation.py index a4cde97d1d..eb644938ff 100644 --- a/tests/server/test_lowlevel_input_validation.py +++ b/tests/server/test_lowlevel_input_validation.py @@ -103,7 +103,7 @@ def create_add_tool() -> Tool: return Tool( name="add", description="Add two numbers", - inputSchema={ + input_schema={ "type": "object", "properties": { "a": {"type": "number"}, @@ -133,7 +133,7 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results assert result is not None - assert not result.isError + assert not result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) @@ -155,7 +155,7 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results assert result is not None - assert result.isError + assert result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) @@ -178,7 +178,7 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results assert result is not None - assert result.isError + assert result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) @@ -193,7 +193,7 @@ async def test_cache_refresh_on_missing_tool(): Tool( name="multiply", description="Multiply two numbers", - inputSchema={ + input_schema={ "type": "object", "properties": { "x": {"type": "number"}, @@ -220,7 +220,7 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results - should work because cache will be refreshed assert result is not None - assert not result.isError + assert not result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) @@ -234,7 +234,7 @@ async def test_enum_constraint_validation(): Tool( name="greet", description="Greet someone", - inputSchema={ + input_schema={ "type": "object", "properties": { "name": {"type": "string"}, @@ -256,7 +256,7 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results assert result is not None - assert result.isError + assert result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) @@ -271,7 +271,7 @@ async def test_tool_not_in_list_logs_warning(caplog: pytest.LogCaptureFixture): Tool( name="add", description="Add two numbers", - inputSchema={ + input_schema={ "type": "object", "properties": { "a": {"type": "number"}, @@ -300,7 +300,7 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results - should succeed because validation is skipped for unknown tools assert result is not None - assert not result.isError + assert not result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) diff --git a/tests/server/test_lowlevel_output_validation.py b/tests/server/test_lowlevel_output_validation.py index e1a6040e02..3b1b7236b5 100644 --- a/tests/server/test_lowlevel_output_validation.py +++ b/tests/server/test_lowlevel_output_validation.py @@ -106,7 +106,7 @@ async def test_content_only_without_output_schema(): Tool( name="echo", description="Echo a message", - inputSchema={ + input_schema={ "type": "object", "properties": { "message": {"type": "string"}, @@ -130,12 +130,12 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results assert result is not None - assert not result.isError + assert not result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) assert result.content[0].text == "Echo: Hello" - assert result.structuredContent is None + assert result.structured_content is None @pytest.mark.anyio @@ -145,7 +145,7 @@ async def test_dict_only_without_output_schema(): Tool( name="get_info", description="Get structured information", - inputSchema={ + input_schema={ "type": "object", "properties": {}, }, @@ -166,13 +166,13 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results assert result is not None - assert not result.isError + assert not result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) # Check that the content is the JSON serialization assert json.loads(result.content[0].text) == {"status": "ok", "data": {"value": 42}} - assert result.structuredContent == {"status": "ok", "data": {"value": 42}} + assert result.structured_content == {"status": "ok", "data": {"value": 42}} @pytest.mark.anyio @@ -182,7 +182,7 @@ async def test_both_content_and_dict_without_output_schema(): Tool( name="process", description="Process data", - inputSchema={ + input_schema={ "type": "object", "properties": {}, }, @@ -205,12 +205,12 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results assert result is not None - assert not result.isError + assert not result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) assert result.content[0].text == "Processing complete" - assert result.structuredContent == {"result": "success", "count": 10} + assert result.structured_content == {"result": "success", "count": 10} @pytest.mark.anyio @@ -220,11 +220,11 @@ async def test_content_only_with_output_schema_error(): Tool( name="structured_tool", description="Tool expecting structured output", - inputSchema={ + input_schema={ "type": "object", "properties": {}, }, - outputSchema={ + output_schema={ "type": "object", "properties": { "result": {"type": "string"}, @@ -245,7 +245,7 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify error assert result is not None - assert result.isError + assert result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) @@ -259,7 +259,7 @@ async def test_valid_dict_with_output_schema(): Tool( name="calc", description="Calculate result", - inputSchema={ + input_schema={ "type": "object", "properties": { "x": {"type": "number"}, @@ -267,7 +267,7 @@ async def test_valid_dict_with_output_schema(): }, "required": ["x", "y"], }, - outputSchema={ + output_schema={ "type": "object", "properties": { "sum": {"type": "number"}, @@ -293,12 +293,12 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results assert result is not None - assert not result.isError + assert not result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" # Check JSON serialization assert json.loads(result.content[0].text) == {"sum": 7, "product": 12} - assert result.structuredContent == {"sum": 7, "product": 12} + assert result.structured_content == {"sum": 7, "product": 12} @pytest.mark.anyio @@ -308,11 +308,11 @@ async def test_invalid_dict_with_output_schema(): Tool( name="user_info", description="Get user information", - inputSchema={ + input_schema={ "type": "object", "properties": {}, }, - outputSchema={ + output_schema={ "type": "object", "properties": { "name": {"type": "string"}, @@ -337,7 +337,7 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify error assert result is not None - assert result.isError + assert result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert isinstance(result.content[0], TextContent) @@ -352,14 +352,14 @@ async def test_both_content_and_valid_dict_with_output_schema(): Tool( name="analyze", description="Analyze data", - inputSchema={ + input_schema={ "type": "object", "properties": { "text": {"type": "string"}, }, "required": ["text"], }, - outputSchema={ + output_schema={ "type": "object", "properties": { "sentiment": {"type": "string", "enum": ["positive", "negative", "neutral"]}, @@ -385,11 +385,11 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results assert result is not None - assert not result.isError + assert not result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert result.content[0].text == "Analysis of: Great job!" - assert result.structuredContent == {"sentiment": "positive", "confidence": 0.95} + assert result.structured_content == {"sentiment": "positive", "confidence": 0.95} @pytest.mark.anyio @@ -399,7 +399,7 @@ async def test_tool_call_result(): Tool( name="get_info", description="Get structured information", - inputSchema={ + input_schema={ "type": "object", "properties": {}, }, @@ -411,7 +411,7 @@ async def call_tool_handler(name: str, arguments: dict[str, Any]) -> CallToolRes if name == "get_info": return CallToolResult( content=[TextContent(type="text", text="Results calculated")], - structuredContent={"status": "ok", "data": {"value": 42}}, + structured_content={"status": "ok", "data": {"value": 42}}, _meta={"some": "metadata"}, ) else: # pragma: no cover @@ -424,12 +424,12 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify results assert result is not None - assert not result.isError + assert not result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert result.content[0].text == "Results calculated" assert isinstance(result.content[0], TextContent) - assert result.structuredContent == {"status": "ok", "data": {"value": 42}} + assert result.structured_content == {"status": "ok", "data": {"value": 42}} assert result.meta == {"some": "metadata"} @@ -440,11 +440,11 @@ async def test_output_schema_type_validation(): Tool( name="stats", description="Get statistics", - inputSchema={ + input_schema={ "type": "object", "properties": {}, }, - outputSchema={ + output_schema={ "type": "object", "properties": { "count": {"type": "integer"}, @@ -470,7 +470,7 @@ async def test_callback(client_session: ClientSession) -> CallToolResult: # Verify error assert result is not None - assert result.isError + assert result.is_error assert len(result.content) == 1 assert result.content[0].type == "text" assert "Output validation error:" in result.content[0].text diff --git a/tests/server/test_lowlevel_tool_annotations.py b/tests/server/test_lowlevel_tool_annotations.py index 3f852d27a8..614ca2dce5 100644 --- a/tests/server/test_lowlevel_tool_annotations.py +++ b/tests/server/test_lowlevel_tool_annotations.py @@ -25,7 +25,7 @@ async def list_tools(): # pragma: no cover Tool( name="echo", description="Echo a message back", - inputSchema={ + input_schema={ "type": "object", "properties": { "message": {"type": "string"}, @@ -34,7 +34,7 @@ async def list_tools(): # pragma: no cover }, annotations=ToolAnnotations( title="Echo Tool", - readOnlyHint=True, + read_only_hint=True, ), ) ] @@ -98,4 +98,4 @@ async def handle_messages(): assert tools_result.tools[0].name == "echo" assert tools_result.tools[0].annotations is not None assert tools_result.tools[0].annotations.title == "Echo Tool" - assert tools_result.tools[0].annotations.readOnlyHint is True + assert tools_result.tools[0].annotations.read_only_hint is True diff --git a/tests/server/test_read_resource.py b/tests/server/test_read_resource.py index 75a1f19935..0f62fe235a 100644 --- a/tests/server/test_read_resource.py +++ b/tests/server/test_read_resource.py @@ -45,7 +45,7 @@ async def read_resource(uri: str) -> Iterable[ReadResourceContents]: content = result.root.contents[0] assert isinstance(content, types.TextResourceContents) assert content.text == "Hello World" - assert content.mimeType == "text/plain" + assert content.mime_type == "text/plain" @pytest.mark.anyio @@ -71,7 +71,7 @@ async def read_resource(uri: str) -> Iterable[ReadResourceContents]: content = result.root.contents[0] assert isinstance(content, types.BlobResourceContents) - assert content.mimeType == "application/octet-stream" + assert content.mime_type == "application/octet-stream" @pytest.mark.anyio @@ -103,4 +103,4 @@ async def read_resource(uri: str) -> Iterable[ReadResourceContents]: content = result.root.contents[0] assert isinstance(content, types.TextResourceContents) assert content.text == "Hello World" - assert content.mimeType == "text/plain" + assert content.mime_type == "text/plain" diff --git a/tests/server/test_session.py b/tests/server/test_session.py index f9652f49b8..ced1d92ff7 100644 --- a/tests/server/test_session.py +++ b/tests/server/test_session.py @@ -101,7 +101,7 @@ async def list_prompts() -> list[Prompt]: # pragma: no cover return [] caps = server.get_capabilities(notification_options, experimental_capabilities) - assert caps.prompts == PromptsCapability(listChanged=False) + assert caps.prompts == PromptsCapability(list_changed=False) assert caps.resources is None assert caps.completions is None @@ -111,8 +111,8 @@ async def list_resources() -> list[Resource]: # pragma: no cover return [] caps = server.get_capabilities(notification_options, experimental_capabilities) - assert caps.prompts == PromptsCapability(listChanged=False) - assert caps.resources == ResourcesCapability(subscribe=False, listChanged=False) + assert caps.prompts == PromptsCapability(list_changed=False) + assert caps.resources == ResourcesCapability(subscribe=False, list_changed=False) assert caps.completions is None # Add a complete handler @@ -127,8 +127,8 @@ async def complete( # pragma: no cover ) caps = server.get_capabilities(notification_options, experimental_capabilities) - assert caps.prompts == PromptsCapability(listChanged=False) - assert caps.resources == ResourcesCapability(subscribe=False, listChanged=False) + assert caps.prompts == PromptsCapability(list_changed=False) + assert caps.resources == ResourcesCapability(subscribe=False, list_changed=False) assert caps.completions == CompletionsCapability() @@ -175,9 +175,9 @@ async def mock_client(): id=1, method="initialize", params=types.InitializeRequestParams( - protocolVersion="2024-11-05", + protocol_version="2024-11-05", capabilities=types.ClientCapabilities(), - clientInfo=types.Implementation(name="test-client", version="1.0.0"), + client_info=types.Implementation(name="test-client", version="1.0.0"), ).model_dump(by_alias=True, mode="json", exclude_none=True), ) ) @@ -191,7 +191,7 @@ async def mock_client(): init_result = types.InitializeResult.model_validate(result_data) # Check that the server responded with the requested protocol version - received_protocol_version = init_result.protocolVersion + received_protocol_version = init_result.protocol_version assert received_protocol_version == "2024-11-05" # Send initialized notification @@ -312,17 +312,17 @@ async def test_create_message_tool_result_validation(): ) as session: # Set up client params with sampling.tools capability for the test session._client_params = types.InitializeRequestParams( - protocolVersion=types.LATEST_PROTOCOL_VERSION, + protocol_version=types.LATEST_PROTOCOL_VERSION, capabilities=types.ClientCapabilities( sampling=types.SamplingCapability(tools=types.SamplingToolsCapability()) ), - clientInfo=types.Implementation(name="test", version="1.0"), + client_info=types.Implementation(name="test", version="1.0"), ) - tool = types.Tool(name="test_tool", inputSchema={"type": "object"}) + tool = types.Tool(name="test_tool", input_schema={"type": "object"}) text = types.TextContent(type="text", text="hello") tool_use = types.ToolUseContent(type="tool_use", id="call_1", name="test_tool", input={}) - tool_result = types.ToolResultContent(type="tool_result", toolUseId="call_1", content=[]) + tool_result = types.ToolResultContent(type="tool_result", tool_use_id="call_1", content=[]) # Case 1: tool_result mixed with other content with pytest.raises(ValueError, match="only tool_result content"): @@ -363,7 +363,7 @@ async def test_create_message_tool_result_validation(): types.SamplingMessage(role="assistant", content=tool_use), types.SamplingMessage( role="user", - content=types.ToolResultContent(type="tool_result", toolUseId="wrong_id", content=[]), + content=types.ToolResultContent(type="tool_result", tool_use_id="wrong_id", content=[]), ), ], max_tokens=100, @@ -438,12 +438,12 @@ async def test_create_message_without_tools_capability(): ) as session: # Set up client params WITHOUT sampling.tools capability session._client_params = types.InitializeRequestParams( - protocolVersion=types.LATEST_PROTOCOL_VERSION, + protocol_version=types.LATEST_PROTOCOL_VERSION, capabilities=types.ClientCapabilities(sampling=types.SamplingCapability()), - clientInfo=types.Implementation(name="test", version="1.0"), + client_info=types.Implementation(name="test", version="1.0"), ) - tool = types.Tool(name="test_tool", inputSchema={"type": "object"}) + tool = types.Tool(name="test_tool", input_schema={"type": "object"}) text = types.TextContent(type="text", text="hello") # Should raise McpError when tools are provided but client lacks capability diff --git a/tests/server/test_session_race_condition.py b/tests/server/test_session_race_condition.py index b5388167ad..42c5578b02 100644 --- a/tests/server/test_session_race_condition.py +++ b/tests/server/test_session_race_condition.py @@ -49,7 +49,7 @@ async def run_server(): server_name="test-server", server_version="1.0.0", capabilities=ServerCapabilities( - tools=types.ToolsCapability(listChanged=False), + tools=types.ToolsCapability(list_changed=False), ), ), ) as server_session: @@ -70,7 +70,7 @@ async def run_server(): Tool( name="example_tool", description="An example tool", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ) ] ) @@ -95,9 +95,9 @@ async def mock_client(): id=1, method="initialize", params=types.InitializeRequestParams( - protocolVersion=types.LATEST_PROTOCOL_VERSION, + protocol_version=types.LATEST_PROTOCOL_VERSION, capabilities=types.ClientCapabilities(), - clientInfo=types.Implementation(name="test-client", version="1.0.0"), + client_info=types.Implementation(name="test-client", version="1.0.0"), ).model_dump(by_alias=True, mode="json", exclude_none=True), ) ) diff --git a/tests/server/test_stateless_mode.py b/tests/server/test_stateless_mode.py index c59ea23511..2a40d60982 100644 --- a/tests/server/test_stateless_mode.py +++ b/tests/server/test_stateless_mode.py @@ -76,7 +76,7 @@ async def test_elicit_form_fails_in_stateless_mode(stateless_session: ServerSess with pytest.raises(StatelessModeNotSupported, match="elicitation"): await stateless_session.elicit_form( message="Please provide input", - requestedSchema={"type": "object", "properties": {}}, + requested_schema={"type": "object", "properties": {}}, ) @@ -97,7 +97,7 @@ async def test_elicit_deprecated_fails_in_stateless_mode(stateless_session: Serv with pytest.raises(StatelessModeNotSupported, match="elicitation"): await stateless_session.elicit( message="Please provide input", - requestedSchema={"type": "object", "properties": {}}, + requested_schema={"type": "object", "properties": {}}, ) diff --git a/tests/server/test_validation.py b/tests/server/test_validation.py index 56044460df..11c61d93b7 100644 --- a/tests/server/test_validation.py +++ b/tests/server/test_validation.py @@ -53,7 +53,7 @@ def test_no_error_when_tools_none(self) -> None: def test_raises_when_tools_provided_but_no_capability(self) -> None: """Raises McpError when tools provided but client doesn't support.""" - tool = Tool(name="test", inputSchema={"type": "object"}) + tool = Tool(name="test", input_schema={"type": "object"}) with pytest.raises(McpError) as exc_info: validate_sampling_tools(None, [tool], None) assert "sampling tools capability" in str(exc_info.value) @@ -67,7 +67,7 @@ def test_raises_when_tool_choice_provided_but_no_capability(self) -> None: def test_no_error_when_capability_present(self) -> None: """No error when client has sampling.tools capability.""" caps = ClientCapabilities(sampling=SamplingCapability(tools=SamplingToolsCapability())) - tool = Tool(name="test", inputSchema={"type": "object"}) + tool = Tool(name="test", input_schema={"type": "object"}) validate_sampling_tools(caps, [tool], ToolChoice(mode="auto")) # Should not raise @@ -92,7 +92,7 @@ def test_raises_when_tool_result_mixed_with_other_content(self) -> None: SamplingMessage( role="user", content=[ - ToolResultContent(type="tool_result", toolUseId="123"), + ToolResultContent(type="tool_result", tool_use_id="123"), TextContent(type="text", text="also this"), ], ), @@ -105,7 +105,7 @@ def test_raises_when_tool_result_without_previous_tool_use(self) -> None: messages = [ SamplingMessage( role="user", - content=ToolResultContent(type="tool_result", toolUseId="123"), + content=ToolResultContent(type="tool_result", tool_use_id="123"), ), ] with pytest.raises(ValueError, match="previous message containing tool_use"): @@ -120,7 +120,7 @@ def test_raises_when_tool_result_ids_dont_match_tool_use(self) -> None: ), SamplingMessage( role="user", - content=ToolResultContent(type="tool_result", toolUseId="tool-2"), + content=ToolResultContent(type="tool_result", tool_use_id="tool-2"), ), ] with pytest.raises(ValueError, match="do not match"): @@ -135,7 +135,7 @@ def test_no_error_when_tool_result_matches_tool_use(self) -> None: ), SamplingMessage( role="user", - content=ToolResultContent(type="tool_result", toolUseId="tool-1"), + content=ToolResultContent(type="tool_result", tool_use_id="tool-1"), ), ] validate_tool_use_result_messages(messages) # Should not raise diff --git a/tests/shared/test_exceptions.py b/tests/shared/test_exceptions.py index 8845dfe781..1a42e7aefe 100644 --- a/tests/shared/test_exceptions.py +++ b/tests/shared/test_exceptions.py @@ -15,14 +15,14 @@ def test_create_with_single_elicitation(self) -> None: mode="url", message="Auth required", url="https://example.com/auth", - elicitationId="test-123", + elicitation_id="test-123", ) error = UrlElicitationRequiredError([elicitation]) assert error.error.code == URL_ELICITATION_REQUIRED assert error.error.message == "URL elicitation required" assert len(error.elicitations) == 1 - assert error.elicitations[0].elicitationId == "test-123" + assert error.elicitations[0].elicitation_id == "test-123" def test_create_with_multiple_elicitations(self) -> None: """Test creating error with multiple elicitations uses plural message.""" @@ -31,13 +31,13 @@ def test_create_with_multiple_elicitations(self) -> None: mode="url", message="Auth 1", url="https://example.com/auth1", - elicitationId="test-1", + elicitation_id="test-1", ), ElicitRequestURLParams( mode="url", message="Auth 2", url="https://example.com/auth2", - elicitationId="test-2", + elicitation_id="test-2", ), ] error = UrlElicitationRequiredError(elicitations) @@ -51,7 +51,7 @@ def test_custom_message(self) -> None: mode="url", message="Auth required", url="https://example.com/auth", - elicitationId="test-123", + elicitation_id="test-123", ) error = UrlElicitationRequiredError([elicitation], message="Custom message") @@ -77,7 +77,7 @@ def test_from_error_data(self) -> None: error = UrlElicitationRequiredError.from_error(error_data) assert len(error.elicitations) == 1 - assert error.elicitations[0].elicitationId == "test-123" + assert error.elicitations[0].elicitation_id == "test-123" assert error.elicitations[0].url == "https://example.com/auth" def test_from_error_data_wrong_code(self) -> None: @@ -99,7 +99,7 @@ def test_serialization_roundtrip(self) -> None: mode="url", message="Auth required", url="https://example.com/auth", - elicitationId="test-123", + elicitation_id="test-123", ) ] ) @@ -110,7 +110,7 @@ def test_serialization_roundtrip(self) -> None: # Reconstruct reconstructed = UrlElicitationRequiredError.from_error(error_data) - assert reconstructed.elicitations[0].elicitationId == original.elicitations[0].elicitationId + assert reconstructed.elicitations[0].elicitation_id == original.elicitations[0].elicitation_id assert reconstructed.elicitations[0].url == original.elicitations[0].url assert reconstructed.elicitations[0].message == original.elicitations[0].message @@ -120,7 +120,7 @@ def test_error_data_contains_elicitations(self) -> None: mode="url", message="Please authenticate", url="https://example.com/oauth", - elicitationId="oauth-flow-1", + elicitation_id="oauth-flow-1", ) error = UrlElicitationRequiredError([elicitation]) @@ -138,7 +138,7 @@ def test_inherits_from_mcp_error(self) -> None: mode="url", message="Auth required", url="https://example.com/auth", - elicitationId="test-123", + elicitation_id="test-123", ) error = UrlElicitationRequiredError([elicitation]) @@ -151,7 +151,7 @@ def test_exception_message(self) -> None: mode="url", message="Auth required", url="https://example.com/auth", - elicitationId="test-123", + elicitation_id="test-123", ) error = UrlElicitationRequiredError([elicitation]) diff --git a/tests/shared/test_progress_notifications.py b/tests/shared/test_progress_notifications.py index 1552711d2e..5f0ac83fdc 100644 --- a/tests/shared/test_progress_notifications.py +++ b/tests/shared/test_progress_notifications.py @@ -79,7 +79,7 @@ async def handle_list_tools() -> list[types.Tool]: types.Tool( name="test_tool", description="A tool that sends progress notifications list[types.Tool]: types.Tool( name="progress_tool", description="A tool that sends progress notifications", - inputSchema={}, + input_schema={}, ) ] diff --git a/tests/shared/test_session.py b/tests/shared/test_session.py index c138e8428a..13ebf0e704 100644 --- a/tests/shared/test_session.py +++ b/tests/shared/test_session.py @@ -82,7 +82,7 @@ async def handle_list_tools() -> list[types.Tool]: types.Tool( name="slow_tool", description="A slow tool that takes 10 seconds to complete", - inputSchema={}, + input_schema={}, ) ] diff --git a/tests/shared/test_sse.py b/tests/shared/test_sse.py index 99d84515ef..ad198e627b 100644 --- a/tests/shared/test_sse.py +++ b/tests/shared/test_sse.py @@ -78,7 +78,7 @@ async def handle_list_tools() -> list[Tool]: Tool( name="test_tool", description="A test tool", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ) ] @@ -184,7 +184,7 @@ async def test_sse_client_basic_connection(server: None, server_url: str) -> Non # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == SERVER_NAME + assert result.server_info.name == SERVER_NAME # Test ping ping_result = await session.send_ping() @@ -330,7 +330,7 @@ async def test_sse_client_basic_connection_mounted_app(mounted_server: None, ser # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == SERVER_NAME + assert result.server_info.name == SERVER_NAME # Test ping ping_result = await session.send_ping() @@ -366,12 +366,12 @@ async def handle_list_tools() -> list[Tool]: Tool( name="echo_headers", description="Echoes request headers", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), Tool( name="echo_context", description="Echoes request context", - inputSchema={ + input_schema={ "type": "object", "properties": {"request_id": {"type": "string"}}, "required": ["request_id"], @@ -558,9 +558,9 @@ async def test_sse_client_handles_empty_keepalive_pings() -> None: """ # Build a proper JSON-RPC response using types (not hardcoded strings) init_result = InitializeResult( - protocolVersion="2024-11-05", + protocol_version="2024-11-05", capabilities=ServerCapabilities(), - serverInfo=Implementation(name="test", version="1.0"), + server_info=Implementation(name="test", version="1.0"), ) response = JSONRPCResponse( jsonrpc="2.0", diff --git a/tests/shared/test_streamable_http.py b/tests/shared/test_streamable_http.py index 795bd9705e..09cf54bceb 100644 --- a/tests/shared/test_streamable_http.py +++ b/tests/shared/test_streamable_http.py @@ -154,47 +154,47 @@ async def handle_list_tools() -> list[Tool]: Tool( name="test_tool", description="A test tool", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), Tool( name="test_tool_with_standalone_notification", description="A test tool that sends a notification", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), Tool( name="long_running_with_checkpoints", description="A long-running tool that sends periodic notifications", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), Tool( name="test_sampling_tool", description="A tool that triggers server-side sampling", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), Tool( name="wait_for_lock_with_notification", description="A tool that sends a notification and waits for lock", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), Tool( name="release_lock", description="A tool that releases the lock", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), Tool( name="tool_with_stream_close", description="A tool that closes SSE stream mid-operation", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), Tool( name="tool_with_multiple_notifications_and_close", description="Tool that sends notification1, closes stream, sends notification2, notification3", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), Tool( name="tool_with_multiple_stream_closes", description="Tool that closes SSE stream multiple times during execution", - inputSchema={ + input_schema={ "type": "object", "properties": { "checkpoints": {"type": "integer", "default": 3}, @@ -205,7 +205,7 @@ async def handle_list_tools() -> list[Tool]: Tool( name="tool_with_standalone_stream_close", description="Tool that closes standalone GET stream mid-operation", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), ] @@ -1004,7 +1004,7 @@ async def test_streamable_http_client_basic_connection(basic_server: None, basic # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == SERVER_NAME + assert result.server_info.name == SERVER_NAME @pytest.mark.anyio @@ -1084,7 +1084,7 @@ async def test_streamable_http_client_json_response(json_response_server: None, # Initialize the session result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == SERVER_NAME + assert result.server_info.name == SERVER_NAME # Check tool listing tools = await session.list_tools() @@ -1286,7 +1286,7 @@ async def on_resumption_token_update(token: str) -> None: captured_session_id = get_session_id() assert captured_session_id is not None # Capture the negotiated protocol version - captured_protocol_version = result.protocolVersion + captured_protocol_version = result.protocol_version # Start the tool that will wait on lock in a task async with anyio.create_task_group() as tg: @@ -1395,7 +1395,7 @@ async def sampling_callback( text=f"Received message from server: {message_received}", ), model="test-model", - stopReason="endTurn", + stop_reason="endTurn", ) # Create client with sampling callback @@ -1439,12 +1439,12 @@ async def handle_list_tools() -> list[Tool]: Tool( name="echo_headers", description="Echo request headers from context", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ), Tool( name="echo_context", description="Echo request context with custom data", - inputSchema={ + input_schema={ "type": "object", "properties": { "request_id": {"type": "string"}, @@ -1553,7 +1553,7 @@ async def test_streamablehttp_request_context_propagation(context_aware_server: async with ClientSession(read_stream, write_stream) as session: # pragma: no branch result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == "ContextAwareServer" + assert result.server_info.name == "ContextAwareServer" # Call the tool that echoes headers back tool_result = await session.call_tool("echo_headers", {}) @@ -1619,7 +1619,7 @@ async def test_client_includes_protocol_version_header_after_init(context_aware_ async with ClientSession(read_stream, write_stream) as session: # Initialize and get the negotiated version init_result = await session.initialize() - negotiated_version = init_result.protocolVersion + negotiated_version = init_result.protocol_version # Call a tool that echoes headers to verify the header is present tool_result = await session.call_tool("echo_headers", {}) diff --git a/tests/shared/test_ws.py b/tests/shared/test_ws.py index e24063ffc9..495ed9954a 100644 --- a/tests/shared/test_ws.py +++ b/tests/shared/test_ws.py @@ -68,7 +68,7 @@ async def handle_list_tools() -> list[Tool]: Tool( name="test_tool", description="A test tool", - inputSchema={"type": "object", "properties": {}}, + input_schema={"type": "object", "properties": {}}, ) ] @@ -135,7 +135,7 @@ async def initialized_ws_client_session(server: None, server_url: str) -> AsyncG # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == SERVER_NAME + assert result.server_info.name == SERVER_NAME # Test ping ping_result = await session.send_ping() @@ -153,7 +153,7 @@ async def test_ws_client_basic_connection(server: None, server_url: str) -> None # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) - assert result.serverInfo.name == SERVER_NAME + assert result.server_info.name == SERVER_NAME # Test ping ping_result = await session.send_ping() diff --git a/tests/test_examples.py b/tests/test_examples.py index 6f5464e394..41859c493a 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -55,8 +55,8 @@ async def test_direct_call_tool_result_return(): content = result.content[0] assert isinstance(content, TextContent) assert content.text == "hello" - assert result.structuredContent - assert result.structuredContent["text"] == "hello" + assert result.structured_content + assert result.structured_content["text"] == "hello" assert isinstance(result.meta, dict) assert result.meta["some"] == "metadata" diff --git a/tests/test_types.py b/tests/test_types.py index 1c16c3cc6e..5dff962b5f 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -57,9 +57,9 @@ async def test_method_initialization(): """ initialize_request = InitializeRequest( params=InitializeRequestParams( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ClientCapabilities(), - clientInfo=Implementation( + client_info=Implementation( name="mcp", version="0.1.0", ), @@ -68,7 +68,7 @@ async def test_method_initialization(): assert initialize_request.method == "initialize", "method should be set to 'initialize'" assert initialize_request.params is not None - assert initialize_request.params.protocolVersion == LATEST_PROTOCOL_VERSION + assert initialize_request.params.protocol_version == LATEST_PROTOCOL_VERSION @pytest.mark.anyio @@ -105,9 +105,9 @@ async def test_tool_result_content(): tool_result = ToolResultContent.model_validate(tool_result_data) assert tool_result.type == "tool_result" - assert tool_result.toolUseId == "call_abc123" + assert tool_result.tool_use_id == "call_abc123" assert len(tool_result.content) == 1 - assert tool_result.isError is False + assert tool_result.is_error is False # Test with empty content (should default to []) minimal_result_data = {"type": "tool_result", "toolUseId": "call_xyz"} @@ -221,21 +221,21 @@ async def test_create_message_request_params_with_tools(): tool = Tool( name="get_weather", description="Get weather information", - inputSchema={"type": "object", "properties": {"location": {"type": "string"}}}, + input_schema={"type": "object", "properties": {"location": {"type": "string"}}}, ) params = CreateMessageRequestParams( messages=[SamplingMessage(role="user", content=TextContent(type="text", text="What's the weather?"))], - maxTokens=1000, + max_tokens=1000, tools=[tool], - toolChoice=ToolChoice(mode="auto"), + tool_choice=ToolChoice(mode="auto"), ) assert params.tools is not None assert len(params.tools) == 1 assert params.tools[0].name == "get_weather" - assert params.toolChoice is not None - assert params.toolChoice.mode == "auto" + assert params.tool_choice is not None + assert params.tool_choice.mode == "auto" @pytest.mark.anyio @@ -252,7 +252,7 @@ async def test_create_message_result_with_tool_use(): result = CreateMessageResultWithTools.model_validate(result_data) assert result.role == "assistant" assert isinstance(result.content, ToolUseContent) - assert result.stopReason == "toolUse" + assert result.stop_reason == "toolUse" assert result.model == "claude-3" # Test content_as_list with single content (covers else branch) @@ -276,7 +276,7 @@ async def test_create_message_result_basic(): assert result.role == "assistant" assert isinstance(result.content, TextContent) assert result.content.text == "Hello!" - assert result.stopReason == "endTurn" + assert result.stop_reason == "endTurn" assert result.model == "claude-3" @@ -322,13 +322,13 @@ def test_tool_preserves_json_schema_2020_12_fields(): "additionalProperties": False, } - tool = Tool(name="test_tool", description="A test tool", inputSchema=input_schema) + tool = Tool(name="test_tool", description="A test tool", input_schema=input_schema) # Verify fields are preserved in the model - assert tool.inputSchema["$schema"] == "https://json-schema.org/draft/2020-12/schema" - assert "$defs" in tool.inputSchema - assert "address" in tool.inputSchema["$defs"] - assert tool.inputSchema["additionalProperties"] is False + assert tool.input_schema["$schema"] == "https://json-schema.org/draft/2020-12/schema" + assert "$defs" in tool.input_schema + assert "address" in tool.input_schema["$defs"] + assert tool.input_schema["additionalProperties"] is False # Verify fields survive serialization round-trip serialized = tool.model_dump(mode="json", by_alias=True) @@ -358,6 +358,6 @@ def test_list_tools_result_preserves_json_schema_2020_12_fields(): result = ListToolsResult.model_validate(raw_response) tool = result.tools[0] - assert tool.inputSchema["$schema"] == "https://json-schema.org/draft/2020-12/schema" - assert "$defs" in tool.inputSchema - assert tool.inputSchema["additionalProperties"] is False + assert tool.input_schema["$schema"] == "https://json-schema.org/draft/2020-12/schema" + assert "$defs" in tool.input_schema + assert tool.input_schema["additionalProperties"] is False From 2b03860a61e4ca7c49640a272e3f1f23501d7ca0 Mon Sep 17 00:00:00 2001 From: Marcelo Trylesinski Date: Fri, 16 Jan 2026 15:13:16 +0100 Subject: [PATCH 2/3] format --- tests/experimental/tasks/server/test_server.py | 4 +++- tests/server/fastmcp/test_server.py | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/experimental/tasks/server/test_server.py b/tests/experimental/tasks/server/test_server.py index 49810df12d..94b37e6d07 100644 --- a/tests/experimental/tasks/server/test_server.py +++ b/tests/experimental/tasks/server/test_server.py @@ -539,7 +539,9 @@ async def run_server() -> None: # Test get_task_result (default handler) payload_result = await client_session.send_request( - ClientRequest(GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=completed_task.task_id))), + ClientRequest( + GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=completed_task.task_id)) + ), GetTaskPayloadResult, ) # The result should have the related-task metadata diff --git a/tests/server/fastmcp/test_server.py b/tests/server/fastmcp/test_server.py index f271bcd713..5bca04e0cf 100644 --- a/tests/server/fastmcp/test_server.py +++ b/tests/server/fastmcp/test_server.py @@ -558,7 +558,8 @@ def get_metadata() -> dict[str, Any]: assert tool.output_schema["type"] == "object" # dict[str, Any] should have minimal schema assert ( - "additionalProperties" not in tool.output_schema or tool.output_schema.get("additionalProperties") is True + "additionalProperties" not in tool.output_schema + or tool.output_schema.get("additionalProperties") is True ) # Call tool From e649acd67782ab2a140c9ee220d052b3ccdd24f5 Mon Sep 17 00:00:00 2001 From: Marcelo Trylesinski Date: Fri, 16 Jan 2026 15:21:23 +0100 Subject: [PATCH 3/3] refactor: use snake case instead of camel case in types --- README.md | 40 +++++++++---------- .../fastmcp/direct_call_tool_result_return.py | 2 +- .../mcp_simple_pagination/server.py | 14 +++---- .../server.py | 2 +- .../mcp_simple_streamablehttp/server.py | 2 +- .../mcp_simple_task_interactive/server.py | 12 +++--- .../simple-task/mcp_simple_task/server.py | 6 +-- .../simple-tool/mcp_simple_tool/server.py | 2 +- .../mcp_sse_polling_demo/server.py | 2 +- .../__main__.py | 4 +- .../snippets/clients/completion_client.py | 14 +++---- .../snippets/clients/display_utilities.py | 2 +- .../snippets/clients/pagination_client.py | 4 +- examples/snippets/servers/completion.py | 4 +- .../servers/direct_call_tool_result.py | 2 +- examples/snippets/servers/elicitation.py | 2 +- .../lowlevel/direct_call_tool_result.py | 4 +- .../snippets/servers/lowlevel/lifespan.py | 2 +- .../servers/lowlevel/structured_output.py | 4 +- .../snippets/servers/pagination_example.py | 2 +- src/mcp/client/experimental/task_handlers.py | 2 +- src/mcp/server/fastmcp/utilities/types.py | 4 +- tests/issues/test_192_request_id.py | 4 +- .../server/lowlevel/test_server_pagination.py | 6 +-- tests/server/test_completion_with_context.py | 16 ++++---- tests/server/test_lifespan.py | 8 ++-- tests/shared/test_session.py | 2 +- 27 files changed, 84 insertions(+), 84 deletions(-) diff --git a/README.md b/README.md index ae3e73f062..8e732cf126 100644 --- a/README.md +++ b/README.md @@ -442,7 +442,7 @@ def validated_tool() -> Annotated[CallToolResult, ValidationModel]: """Return CallToolResult with structured output validation.""" return CallToolResult( content=[TextContent(type="text", text="Validated response")], - structuredContent={"status": "success", "data": {"result": 42}}, + structured_content={"status": "success", "data": {"result": 42}}, _meta={"internal": "metadata"}, ) @@ -757,8 +757,8 @@ async def run(): # List available resource templates templates = await session.list_resource_templates() print("Available resource templates:") - for template in templates.resourceTemplates: - print(f" - {template.uriTemplate}") + for template in templates.resource_templates: + print(f" - {template.uri_template}") # List available prompts prompts = await session.list_prompts() @@ -767,20 +767,20 @@ async def run(): print(f" - {prompt.name}") # Complete resource template arguments - if templates.resourceTemplates: - template = templates.resourceTemplates[0] - print(f"\nCompleting arguments for resource template: {template.uriTemplate}") + if templates.resource_templates: + template = templates.resource_templates[0] + print(f"\nCompleting arguments for resource template: {template.uri_template}") # Complete without context result = await session.complete( - ref=ResourceTemplateReference(type="ref/resource", uri=template.uriTemplate), + ref=ResourceTemplateReference(type="ref/resource", uri=template.uri_template), argument={"name": "owner", "value": "model"}, ) print(f"Completions for 'owner' starting with 'model': {result.completion.values}") # Complete with context - repo suggestions based on owner result = await session.complete( - ref=ResourceTemplateReference(type="ref/resource", uri=template.uriTemplate), + ref=ResourceTemplateReference(type="ref/resource", uri=template.uri_template), argument={"name": "repo", "value": ""}, context_arguments={"owner": "modelcontextprotocol"}, ) @@ -910,7 +910,7 @@ async def connect_service(service_name: str, ctx: Context[ServerSession, None]) mode="url", message=f"Authorization required to connect to {service_name}", url=f"https://{service_name}.example.com/oauth/authorize?elicit={elicitation_id}", - elicitationId=elicitation_id, + elicitation_id=elicitation_id, ) ] ) @@ -1706,7 +1706,7 @@ async def handle_list_tools() -> list[types.Tool]: types.Tool( name="query_db", description="Query the database", - inputSchema={ + input_schema={ "type": "object", "properties": {"query": {"type": "string", "description": "SQL query to execute"}}, "required": ["query"], @@ -1867,12 +1867,12 @@ async def list_tools() -> list[types.Tool]: types.Tool( name="get_weather", description="Get current weather for a city", - inputSchema={ + input_schema={ "type": "object", "properties": {"city": {"type": "string", "description": "City name"}}, "required": ["city"], }, - outputSchema={ + output_schema={ "type": "object", "properties": { "temperature": {"type": "number", "description": "Temperature in Celsius"}, @@ -1970,7 +1970,7 @@ async def list_tools() -> list[types.Tool]: types.Tool( name="advanced_tool", description="Tool with full control including _meta field", - inputSchema={ + input_schema={ "type": "object", "properties": {"message": {"type": "string"}}, "required": ["message"], @@ -1986,7 +1986,7 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> types.CallTo message = str(arguments.get("message", "")) return types.CallToolResult( content=[types.TextContent(type="text", text=f"Processed: {message}")], - structuredContent={"result": "success", "message": message}, + structured_content={"result": "success", "message": message}, _meta={"hidden": "data for client applications only"}, ) @@ -2062,7 +2062,7 @@ async def list_resources_paginated(request: types.ListResourcesRequest) -> types # Determine next cursor next_cursor = str(end) if end < len(ITEMS) else None - return types.ListResourcesResult(resources=page_items, nextCursor=next_cursor) + return types.ListResourcesResult(resources=page_items, next_cursor=next_cursor) ``` _Full example: [examples/snippets/servers/pagination_example.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/pagination_example.py)_ @@ -2103,8 +2103,8 @@ async def list_all_resources() -> None: print(f"Fetched {len(result.resources)} resources") # Check if there are more pages - if result.nextCursor: - cursor = result.nextCursor + if result.next_cursor: + cursor = result.next_cursor else: break @@ -2167,7 +2167,7 @@ async def handle_sampling_message( text="Hello, world! from model", ), model="gpt-3.5-turbo", - stopReason="endTurn", + stop_reason="endTurn", ) @@ -2205,7 +2205,7 @@ async def run(): result_unstructured = result.content[0] if isinstance(result_unstructured, types.TextContent): print(f"Tool result: {result_unstructured.text}") - result_structured = result.structuredContent + result_structured = result.structured_content print(f"Structured tool result: {result_structured}") @@ -2306,7 +2306,7 @@ async def display_resources(session: ClientSession): print(f"Resource: {display_name} ({resource.uri})") templates_response = await session.list_resource_templates() - for template in templates_response.resourceTemplates: + for template in templates_response.resource_templates: display_name = get_display_name(template) print(f"Resource Template: {display_name}") diff --git a/examples/fastmcp/direct_call_tool_result_return.py b/examples/fastmcp/direct_call_tool_result_return.py index a441769b2a..85d5a25979 100644 --- a/examples/fastmcp/direct_call_tool_result_return.py +++ b/examples/fastmcp/direct_call_tool_result_return.py @@ -20,5 +20,5 @@ class EchoResponse(BaseModel): def echo(text: str) -> Annotated[CallToolResult, EchoResponse]: """Echo the input text with structure and metadata""" return CallToolResult( - content=[TextContent(type="text", text=text)], structuredContent={"text": text}, _meta={"some": "metadata"} + content=[TextContent(type="text", text=text)], structured_content={"text": text}, _meta={"some": "metadata"} ) diff --git a/examples/servers/simple-pagination/mcp_simple_pagination/server.py b/examples/servers/simple-pagination/mcp_simple_pagination/server.py index 2412841041..f9a64919a2 100644 --- a/examples/servers/simple-pagination/mcp_simple_pagination/server.py +++ b/examples/servers/simple-pagination/mcp_simple_pagination/server.py @@ -19,7 +19,7 @@ name=f"tool_{i}", title=f"Tool {i}", description=f"This is sample tool number {i}", - inputSchema={"type": "object", "properties": {"input": {"type": "string"}}}, + input_schema={"type": "object", "properties": {"input": {"type": "string"}}}, ) for i in range(1, 26) # 25 tools total ] @@ -71,7 +71,7 @@ async def list_tools_paginated(request: types.ListToolsRequest) -> types.ListToo start_idx = int(cursor) except (ValueError, TypeError): # Invalid cursor, return empty - return types.ListToolsResult(tools=[], nextCursor=None) + return types.ListToolsResult(tools=[], next_cursor=None) # Get the page of tools page_tools = SAMPLE_TOOLS[start_idx : start_idx + page_size] @@ -81,7 +81,7 @@ async def list_tools_paginated(request: types.ListToolsRequest) -> types.ListToo if start_idx + page_size < len(SAMPLE_TOOLS): next_cursor = str(start_idx + page_size) - return types.ListToolsResult(tools=page_tools, nextCursor=next_cursor) + return types.ListToolsResult(tools=page_tools, next_cursor=next_cursor) # Paginated list_resources - returns 10 resources per page @app.list_resources() @@ -100,7 +100,7 @@ async def list_resources_paginated( start_idx = int(cursor) except (ValueError, TypeError): # Invalid cursor, return empty - return types.ListResourcesResult(resources=[], nextCursor=None) + return types.ListResourcesResult(resources=[], next_cursor=None) # Get the page of resources page_resources = SAMPLE_RESOURCES[start_idx : start_idx + page_size] @@ -110,7 +110,7 @@ async def list_resources_paginated( if start_idx + page_size < len(SAMPLE_RESOURCES): next_cursor = str(start_idx + page_size) - return types.ListResourcesResult(resources=page_resources, nextCursor=next_cursor) + return types.ListResourcesResult(resources=page_resources, next_cursor=next_cursor) # Paginated list_prompts - returns 7 prompts per page @app.list_prompts() @@ -129,7 +129,7 @@ async def list_prompts_paginated( start_idx = int(cursor) except (ValueError, TypeError): # Invalid cursor, return empty - return types.ListPromptsResult(prompts=[], nextCursor=None) + return types.ListPromptsResult(prompts=[], next_cursor=None) # Get the page of prompts page_prompts = SAMPLE_PROMPTS[start_idx : start_idx + page_size] @@ -139,7 +139,7 @@ async def list_prompts_paginated( if start_idx + page_size < len(SAMPLE_PROMPTS): next_cursor = str(start_idx + page_size) - return types.ListPromptsResult(prompts=page_prompts, nextCursor=next_cursor) + return types.ListPromptsResult(prompts=page_prompts, next_cursor=next_cursor) # Implement call_tool handler @app.call_tool() diff --git a/examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/server.py b/examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/server.py index f1b3987d28..d42bdb24e4 100644 --- a/examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/server.py +++ b/examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/server.py @@ -73,7 +73,7 @@ async def list_tools() -> list[types.Tool]: types.Tool( name="start-notification-stream", description=("Sends a stream of notifications with configurable count and interval"), - inputSchema={ + input_schema={ "type": "object", "required": ["interval", "count", "caller"], "properties": { diff --git a/examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/server.py b/examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/server.py index bfa9b23727..bb09c119f0 100644 --- a/examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/server.py +++ b/examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/server.py @@ -87,7 +87,7 @@ async def list_tools() -> list[types.Tool]: types.Tool( name="start-notification-stream", description=("Sends a stream of notifications with configurable count and interval"), - inputSchema={ + input_schema={ "type": "object", "required": ["interval", "count", "caller"], "properties": { diff --git a/examples/servers/simple-task-interactive/mcp_simple_task_interactive/server.py b/examples/servers/simple-task-interactive/mcp_simple_task_interactive/server.py index 4d35ca8094..9e8c86eaac 100644 --- a/examples/servers/simple-task-interactive/mcp_simple_task_interactive/server.py +++ b/examples/servers/simple-task-interactive/mcp_simple_task_interactive/server.py @@ -31,17 +31,17 @@ async def list_tools() -> list[types.Tool]: types.Tool( name="confirm_delete", description="Asks for confirmation before deleting (demonstrates elicitation)", - inputSchema={ + input_schema={ "type": "object", "properties": {"filename": {"type": "string"}}, }, - execution=types.ToolExecution(taskSupport=types.TASK_REQUIRED), + execution=types.ToolExecution(task_support=types.TASK_REQUIRED), ), types.Tool( name="write_haiku", description="Asks LLM to write a haiku (demonstrates sampling)", - inputSchema={"type": "object", "properties": {"topic": {"type": "string"}}}, - execution=types.ToolExecution(taskSupport=types.TASK_REQUIRED), + input_schema={"type": "object", "properties": {"topic": {"type": "string"}}}, + execution=types.ToolExecution(task_support=types.TASK_REQUIRED), ), ] @@ -59,7 +59,7 @@ async def work(task: ServerTaskContext) -> types.CallToolResult: result = await task.elicit( message=f"Are you sure you want to delete '{filename}'?", - requestedSchema={ + requested_schema={ "type": "object", "properties": {"confirm": {"type": "boolean"}}, "required": ["confirm"], @@ -121,7 +121,7 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> types.CallTo else: return types.CallToolResult( content=[types.TextContent(type="text", text=f"Unknown tool: {name}")], - isError=True, + is_error=True, ) diff --git a/examples/servers/simple-task/mcp_simple_task/server.py b/examples/servers/simple-task/mcp_simple_task/server.py index d0681b8423..ba0d962de1 100644 --- a/examples/servers/simple-task/mcp_simple_task/server.py +++ b/examples/servers/simple-task/mcp_simple_task/server.py @@ -26,8 +26,8 @@ async def list_tools() -> list[types.Tool]: types.Tool( name="long_running_task", description="A task that takes a few seconds to complete with status updates", - inputSchema={"type": "object", "properties": {}}, - execution=types.ToolExecution(taskSupport=types.TASK_REQUIRED), + input_schema={"type": "object", "properties": {}}, + execution=types.ToolExecution(task_support=types.TASK_REQUIRED), ) ] @@ -60,7 +60,7 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> types.CallTo else: return types.CallToolResult( content=[types.TextContent(type="text", text=f"Unknown tool: {name}")], - isError=True, + is_error=True, ) diff --git a/examples/servers/simple-tool/mcp_simple_tool/server.py b/examples/servers/simple-tool/mcp_simple_tool/server.py index 5b2b7d068d..a9a40f4d68 100644 --- a/examples/servers/simple-tool/mcp_simple_tool/server.py +++ b/examples/servers/simple-tool/mcp_simple_tool/server.py @@ -44,7 +44,7 @@ async def list_tools() -> list[types.Tool]: name="fetch", title="Website Fetcher", description="Fetches a website and returns its content", - inputSchema={ + input_schema={ "type": "object", "required": ["url"], "properties": { diff --git a/examples/servers/sse-polling-demo/mcp_sse_polling_demo/server.py b/examples/servers/sse-polling-demo/mcp_sse_polling_demo/server.py index e4bdcaa396..6a5c714361 100644 --- a/examples/servers/sse-polling-demo/mcp_sse_polling_demo/server.py +++ b/examples/servers/sse-polling-demo/mcp_sse_polling_demo/server.py @@ -120,7 +120,7 @@ async def list_tools() -> list[types.Tool]: "Process a batch of items with periodic checkpoints. " "Demonstrates SSE polling where server closes stream periodically." ), - inputSchema={ + input_schema={ "type": "object", "properties": { "items": { diff --git a/examples/servers/structured-output-lowlevel/mcp_structured_output_lowlevel/__main__.py b/examples/servers/structured-output-lowlevel/mcp_structured_output_lowlevel/__main__.py index 7f102ff8b5..d730d1daf6 100644 --- a/examples/servers/structured-output-lowlevel/mcp_structured_output_lowlevel/__main__.py +++ b/examples/servers/structured-output-lowlevel/mcp_structured_output_lowlevel/__main__.py @@ -27,12 +27,12 @@ async def list_tools() -> list[types.Tool]: types.Tool( name="get_weather", description="Get weather information (simulated)", - inputSchema={ + input_schema={ "type": "object", "properties": {"city": {"type": "string", "description": "City name"}}, "required": ["city"], }, - outputSchema={ + output_schema={ "type": "object", "properties": { "temperature": {"type": "number"}, diff --git a/examples/snippets/clients/completion_client.py b/examples/snippets/clients/completion_client.py index 8c5615926e..1d2aea1ae7 100644 --- a/examples/snippets/clients/completion_client.py +++ b/examples/snippets/clients/completion_client.py @@ -28,8 +28,8 @@ async def run(): # List available resource templates templates = await session.list_resource_templates() print("Available resource templates:") - for template in templates.resourceTemplates: - print(f" - {template.uriTemplate}") + for template in templates.resource_templates: + print(f" - {template.uri_template}") # List available prompts prompts = await session.list_prompts() @@ -38,20 +38,20 @@ async def run(): print(f" - {prompt.name}") # Complete resource template arguments - if templates.resourceTemplates: - template = templates.resourceTemplates[0] - print(f"\nCompleting arguments for resource template: {template.uriTemplate}") + if templates.resource_templates: + template = templates.resource_templates[0] + print(f"\nCompleting arguments for resource template: {template.uri_template}") # Complete without context result = await session.complete( - ref=ResourceTemplateReference(type="ref/resource", uri=template.uriTemplate), + ref=ResourceTemplateReference(type="ref/resource", uri=template.uri_template), argument={"name": "owner", "value": "model"}, ) print(f"Completions for 'owner' starting with 'model': {result.completion.values}") # Complete with context - repo suggestions based on owner result = await session.complete( - ref=ResourceTemplateReference(type="ref/resource", uri=template.uriTemplate), + ref=ResourceTemplateReference(type="ref/resource", uri=template.uri_template), argument={"name": "repo", "value": ""}, context_arguments={"owner": "modelcontextprotocol"}, ) diff --git a/examples/snippets/clients/display_utilities.py b/examples/snippets/clients/display_utilities.py index 5f1d50510d..047e821c3d 100644 --- a/examples/snippets/clients/display_utilities.py +++ b/examples/snippets/clients/display_utilities.py @@ -39,7 +39,7 @@ async def display_resources(session: ClientSession): print(f"Resource: {display_name} ({resource.uri})") templates_response = await session.list_resource_templates() - for template in templates_response.resourceTemplates: + for template in templates_response.resource_templates: display_name = get_display_name(template) print(f"Resource Template: {display_name}") diff --git a/examples/snippets/clients/pagination_client.py b/examples/snippets/clients/pagination_client.py index 1805d2d315..fd266e4623 100644 --- a/examples/snippets/clients/pagination_client.py +++ b/examples/snippets/clients/pagination_client.py @@ -29,8 +29,8 @@ async def list_all_resources() -> None: print(f"Fetched {len(result.resources)} resources") # Check if there are more pages - if result.nextCursor: - cursor = result.nextCursor + if result.next_cursor: + cursor = result.next_cursor else: break diff --git a/examples/snippets/servers/completion.py b/examples/snippets/servers/completion.py index 2a31541ddc..d7626f0b4b 100644 --- a/examples/snippets/servers/completion.py +++ b/examples/snippets/servers/completion.py @@ -36,7 +36,7 @@ async def handle_completion( languages = ["python", "javascript", "typescript", "go", "rust"] return Completion( values=[lang for lang in languages if lang.startswith(argument.value)], - hasMore=False, + has_more=False, ) # Complete repository names for GitHub resources @@ -44,6 +44,6 @@ async def handle_completion( if ref.uri == "github://repos/{owner}/{repo}" and argument.name == "repo": if context and context.arguments and context.arguments.get("owner") == "modelcontextprotocol": repos = ["python-sdk", "typescript-sdk", "specification"] - return Completion(values=repos, hasMore=False) + return Completion(values=repos, has_more=False) return None diff --git a/examples/snippets/servers/direct_call_tool_result.py b/examples/snippets/servers/direct_call_tool_result.py index 54d49b2f66..3dfff91f12 100644 --- a/examples/snippets/servers/direct_call_tool_result.py +++ b/examples/snippets/servers/direct_call_tool_result.py @@ -31,7 +31,7 @@ def validated_tool() -> Annotated[CallToolResult, ValidationModel]: """Return CallToolResult with structured output validation.""" return CallToolResult( content=[TextContent(type="text", text="Validated response")], - structuredContent={"status": "success", "data": {"result": 42}}, + structured_content={"status": "success", "data": {"result": 42}}, _meta={"internal": "metadata"}, ) diff --git a/examples/snippets/servers/elicitation.py b/examples/snippets/servers/elicitation.py index a1a65fb32c..34921aa4b3 100644 --- a/examples/snippets/servers/elicitation.py +++ b/examples/snippets/servers/elicitation.py @@ -93,7 +93,7 @@ async def connect_service(service_name: str, ctx: Context[ServerSession, None]) mode="url", message=f"Authorization required to connect to {service_name}", url=f"https://{service_name}.example.com/oauth/authorize?elicit={elicitation_id}", - elicitationId=elicitation_id, + elicitation_id=elicitation_id, ) ] ) diff --git a/examples/snippets/servers/lowlevel/direct_call_tool_result.py b/examples/snippets/servers/lowlevel/direct_call_tool_result.py index 496eaad105..4c83abd32f 100644 --- a/examples/snippets/servers/lowlevel/direct_call_tool_result.py +++ b/examples/snippets/servers/lowlevel/direct_call_tool_result.py @@ -21,7 +21,7 @@ async def list_tools() -> list[types.Tool]: types.Tool( name="advanced_tool", description="Tool with full control including _meta field", - inputSchema={ + input_schema={ "type": "object", "properties": {"message": {"type": "string"}}, "required": ["message"], @@ -37,7 +37,7 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> types.CallTo message = str(arguments.get("message", "")) return types.CallToolResult( content=[types.TextContent(type="text", text=f"Processed: {message}")], - structuredContent={"result": "success", "message": message}, + structured_content={"result": "success", "message": message}, _meta={"hidden": "data for client applications only"}, ) diff --git a/examples/snippets/servers/lowlevel/lifespan.py b/examples/snippets/servers/lowlevel/lifespan.py index ada3731224..2ae7c10358 100644 --- a/examples/snippets/servers/lowlevel/lifespan.py +++ b/examples/snippets/servers/lowlevel/lifespan.py @@ -56,7 +56,7 @@ async def handle_list_tools() -> list[types.Tool]: types.Tool( name="query_db", description="Query the database", - inputSchema={ + input_schema={ "type": "object", "properties": {"query": {"type": "string", "description": "SQL query to execute"}}, "required": ["query"], diff --git a/examples/snippets/servers/lowlevel/structured_output.py b/examples/snippets/servers/lowlevel/structured_output.py index 0237c9ab31..6bc4384a61 100644 --- a/examples/snippets/servers/lowlevel/structured_output.py +++ b/examples/snippets/servers/lowlevel/structured_output.py @@ -21,12 +21,12 @@ async def list_tools() -> list[types.Tool]: types.Tool( name="get_weather", description="Get current weather for a city", - inputSchema={ + input_schema={ "type": "object", "properties": {"city": {"type": "string", "description": "City name"}}, "required": ["city"], }, - outputSchema={ + output_schema={ "type": "object", "properties": { "temperature": {"type": "number", "description": "Temperature in Celsius"}, diff --git a/examples/snippets/servers/pagination_example.py b/examples/snippets/servers/pagination_example.py index d62ee59316..aa67750b54 100644 --- a/examples/snippets/servers/pagination_example.py +++ b/examples/snippets/servers/pagination_example.py @@ -33,4 +33,4 @@ async def list_resources_paginated(request: types.ListResourcesRequest) -> types # Determine next cursor next_cursor = str(end) if end < len(ITEMS) else None - return types.ListResourcesResult(resources=page_items, nextCursor=next_cursor) + return types.ListResourcesResult(resources=page_items, next_cursor=next_cursor) diff --git a/src/mcp/client/experimental/task_handlers.py b/src/mcp/client/experimental/task_handlers.py index a47508674b..c6e8957f03 100644 --- a/src/mcp/client/experimental/task_handlers.py +++ b/src/mcp/client/experimental/task_handlers.py @@ -225,7 +225,7 @@ def build_capability(self) -> types.ClientTasksCapability | None: requests_capability: types.ClientTasksRequestsCapability | None = None if has_sampling or has_elicitation: requests_capability = types.ClientTasksRequestsCapability( - sampling=types.TasksSamplingCapability(createMessage=types.TasksCreateMessageCapability()) + sampling=types.TasksSamplingCapability(create_message=types.TasksCreateMessageCapability()) if has_sampling else None, elicitation=types.TasksElicitationCapability(create=types.TasksCreateElicitationCapability()) diff --git a/src/mcp/server/fastmcp/utilities/types.py b/src/mcp/server/fastmcp/utilities/types.py index d6928ca3f8..a1445de196 100644 --- a/src/mcp/server/fastmcp/utilities/types.py +++ b/src/mcp/server/fastmcp/utilities/types.py @@ -51,7 +51,7 @@ def to_image_content(self) -> ImageContent: else: # pragma: no cover raise ValueError("No image data available") - return ImageContent(type="image", data=data, mimeType=self._mime_type) + return ImageContent(type="image", data=data, mime_type=self._mime_type) class Audio: @@ -98,4 +98,4 @@ def to_audio_content(self) -> AudioContent: else: # pragma: no cover raise ValueError("No audio data available") - return AudioContent(type="audio", data=data, mimeType=self._mime_type) + return AudioContent(type="audio", data=data, mime_type=self._mime_type) diff --git a/tests/issues/test_192_request_id.py b/tests/issues/test_192_request_id.py index 3762b092bd..ca4a95e5d9 100644 --- a/tests/issues/test_192_request_id.py +++ b/tests/issues/test_192_request_id.py @@ -59,9 +59,9 @@ async def run_server(): id="init-1", method="initialize", params=InitializeRequestParams( - protocolVersion=LATEST_PROTOCOL_VERSION, + protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ClientCapabilities(), - clientInfo=Implementation(name="test-client", version="1.0.0"), + client_info=Implementation(name="test-client", version="1.0.0"), ).model_dump(by_alias=True, exclude_none=True), jsonrpc="2.0", ) diff --git a/tests/server/lowlevel/test_server_pagination.py b/tests/server/lowlevel/test_server_pagination.py index 8d64dd5253..081fb262ab 100644 --- a/tests/server/lowlevel/test_server_pagination.py +++ b/tests/server/lowlevel/test_server_pagination.py @@ -25,7 +25,7 @@ async def test_list_prompts_pagination() -> None: async def handle_list_prompts(request: ListPromptsRequest) -> ListPromptsResult: nonlocal received_request received_request = request - return ListPromptsResult(prompts=[], nextCursor="next") + return ListPromptsResult(prompts=[], next_cursor="next") handler = server.request_handlers[ListPromptsRequest] @@ -57,7 +57,7 @@ async def test_list_resources_pagination() -> None: async def handle_list_resources(request: ListResourcesRequest) -> ListResourcesResult: nonlocal received_request received_request = request - return ListResourcesResult(resources=[], nextCursor="next") + return ListResourcesResult(resources=[], next_cursor="next") handler = server.request_handlers[ListResourcesRequest] @@ -91,7 +91,7 @@ async def test_list_tools_pagination() -> None: async def handle_list_tools(request: ListToolsRequest) -> ListToolsResult: nonlocal received_request received_request = request - return ListToolsResult(tools=[], nextCursor="next") + return ListToolsResult(tools=[], next_cursor="next") handler = server.request_handlers[ListToolsRequest] diff --git a/tests/server/test_completion_with_context.py b/tests/server/test_completion_with_context.py index eb9604791a..c59916ef22 100644 --- a/tests/server/test_completion_with_context.py +++ b/tests/server/test_completion_with_context.py @@ -36,7 +36,7 @@ async def handle_completion( received_args["context"] = context # Return test completion - return Completion(values=["test-completion"], total=1, hasMore=False) + return Completion(values=["test-completion"], total=1, has_more=False) async with create_connected_server_and_client_session(server) as client: # Test with context @@ -68,7 +68,7 @@ async def handle_completion( nonlocal context_was_none context_was_none = context is None - return Completion(values=["no-context-completion"], total=1, hasMore=False) + return Completion(values=["no-context-completion"], total=1, has_more=False) async with create_connected_server_and_client_session(server) as client: # Test without context @@ -97,17 +97,17 @@ async def handle_completion( if ref.uri == "db://{database}/{table}": if argument.name == "database": # Complete database names - return Completion(values=["users_db", "products_db", "analytics_db"], total=3, hasMore=False) + return Completion(values=["users_db", "products_db", "analytics_db"], total=3, has_more=False) elif argument.name == "table": # Complete table names based on selected database if context and context.arguments: db = context.arguments.get("database") if db == "users_db": - return Completion(values=["users", "sessions", "permissions"], total=3, hasMore=False) + return Completion(values=["users", "sessions", "permissions"], total=3, has_more=False) elif db == "products_db": # pragma: no cover - return Completion(values=["products", "categories", "inventory"], total=3, hasMore=False) + return Completion(values=["products", "categories", "inventory"], total=3, has_more=False) - return Completion(values=[], total=0, hasMore=False) # pragma: no cover + return Completion(values=[], total=0, has_more=False) # pragma: no cover async with create_connected_server_and_client_session(server) as client: # First, complete database @@ -156,9 +156,9 @@ async def handle_completion( # Normal completion if context is provided db = context.arguments.get("database") if db == "test_db": # pragma: no cover - return Completion(values=["users", "orders", "products"], total=3, hasMore=False) + return Completion(values=["users", "orders", "products"], total=3, has_more=False) - return Completion(values=[], total=0, hasMore=False) # pragma: no cover + return Completion(values=[], total=0, has_more=False) # pragma: no cover async with create_connected_server_and_client_session(server) as client: # Try to complete table without database context - should raise error diff --git a/tests/server/test_lifespan.py b/tests/server/test_lifespan.py index 9d73fd47a0..1382785942 100644 --- a/tests/server/test_lifespan.py +++ b/tests/server/test_lifespan.py @@ -76,9 +76,9 @@ async def run_server(): # Initialize the server params = InitializeRequestParams( - protocolVersion="2024-11-05", + protocol_version="2024-11-05", capabilities=ClientCapabilities(), - clientInfo=Implementation(name="test-client", version="0.1.0"), + client_info=Implementation(name="test-client", version="0.1.0"), ) await send_stream1.send( SessionMessage( @@ -182,9 +182,9 @@ async def run_server(): # Initialize the server params = InitializeRequestParams( - protocolVersion="2024-11-05", + protocol_version="2024-11-05", capabilities=ClientCapabilities(), - clientInfo=Implementation(name="test-client", version="0.1.0"), + client_info=Implementation(name="test-client", version="0.1.0"), ) await send_stream1.send( SessionMessage( diff --git a/tests/shared/test_session.py b/tests/shared/test_session.py index 13ebf0e704..bdb7052843 100644 --- a/tests/shared/test_session.py +++ b/tests/shared/test_session.py @@ -118,7 +118,7 @@ async def make_request(client_session: ClientSession): await client_session.send_notification( ClientNotification( CancelledNotification( - params=CancelledNotificationParams(requestId=request_id), + params=CancelledNotificationParams(request_id=request_id), ) ) )