```
├── .git-blame-ignore-revs
├── .gitattribute
├── .github/
├── ISSUE_TEMPLATE/
├── bug.yaml (300 tokens)
├── config.yaml
├── feature-request.yaml (200 tokens)
├── question.yaml (200 tokens)
├── workflows/
├── main-checks.yml
├── publish-docs-manually.yml (200 tokens)
├── publish-pypi.yml (400 tokens)
├── pull-request-checks.yml
├── shared.yml (400 tokens)
├── .gitignore (600 tokens)
├── .pre-commit-config.yaml (300 tokens)
├── CLAUDE.md (800 tokens)
├── CODE_OF_CONDUCT.md (1000 tokens)
├── CONTRIBUTING.md (400 tokens)
├── LICENSE (omitted)
├── README.md (16.7k tokens)
├── RELEASE.md (100 tokens)
├── SECURITY.md (200 tokens)
├── docs/
├── api.md
├── authorization.md
├── concepts.md (100 tokens)
├── index.md (400 tokens)
├── installation.md (300 tokens)
├── low-level-server.md
├── testing.md (500 tokens)
├── examples/
├── README.md
├── clients/
├── simple-auth-client/
├── README.md (300 tokens)
├── mcp_simple_auth_client/
├── __init__.py
├── main.py (2.6k tokens)
├── pyproject.toml (200 tokens)
├── simple-chatbot/
├── .python-version
├── README.MD (700 tokens)
├── mcp_simple_chatbot/
├── .env.example
├── main.py (3k tokens)
├── requirements.txt
├── servers_config.json
├── test.db
├── pyproject.toml (200 tokens)
├── fastmcp/
├── complex_inputs.py (100 tokens)
├── desktop.py (100 tokens)
├── direct_call_tool_result_return.py (100 tokens)
├── echo.py (100 tokens)
├── icons_demo.py (300 tokens)
├── logging_and_progress.py (200 tokens)
├── mcp.png
├── memory.py (2.1k tokens)
├── parameter_descriptions.py (100 tokens)
├── readme-quickstart.py (100 tokens)
├── screenshot.py (200 tokens)
├── simple_echo.py
├── text_me.py (400 tokens)
├── unicode_example.py (300 tokens)
├── weather_structured.py (1500 tokens)
├── servers/
├── simple-auth/
├── README.md (800 tokens)
├── mcp_simple_auth/
├── __init__.py
├── __main__.py
├── auth_server.py (1200 tokens)
├── legacy_as_server.py (900 tokens)
├── server.py (1100 tokens)
├── simple_auth_provider.py (2.1k tokens)
├── token_verifier.py (800 tokens)
├── pyproject.toml (200 tokens)
├── simple-pagination/
├── README.md (500 tokens)
├── mcp_simple_pagination/
├── __init__.py
├── __main__.py
├── server.py (1500 tokens)
├── pyproject.toml (300 tokens)
├── simple-prompt/
├── .python-version
├── README.md (300 tokens)
├── mcp_simple_prompt/
├── __init__.py
├── __main__.py
├── server.py (700 tokens)
├── pyproject.toml (200 tokens)
├── simple-resource/
├── .python-version
├── README.md (200 tokens)
├── mcp_simple_resource/
├── __init__.py
├── __main__.py
├── server.py (600 tokens)
├── pyproject.toml (200 tokens)
├── simple-streamablehttp-stateless/
├── README.md (300 tokens)
├── mcp_simple_streamablehttp_stateless/
├── __init__.py
├── __main__.py
├── server.py (900 tokens)
├── pyproject.toml (200 tokens)
├── simple-streamablehttp/
├── README.md (400 tokens)
├── mcp_simple_streamablehttp/
├── __init__.py
├── __main__.py
├── event_store.py (700 tokens)
├── server.py (1200 tokens)
├── pyproject.toml (200 tokens)
├── simple-tool/
├── .python-version
├── README.md (200 tokens)
├── mcp_simple_tool/
├── __init__.py
├── __main__.py
├── server.py (600 tokens)
├── pyproject.toml (200 tokens)
├── structured-output-lowlevel/
├── mcp_structured_output_lowlevel/
├── __init__.py
├── __main__.py (600 tokens)
├── pyproject.toml
├── snippets/
├── clients/
├── __init__.py
├── completion_client.py (600 tokens)
├── display_utilities.py (400 tokens)
├── oauth_client.py (500 tokens)
├── pagination_client.py (300 tokens)
├── parsing_tool_results.py (500 tokens)
├── stdio_client.py (600 tokens)
├── streamable_basic.py (200 tokens)
├── pyproject.toml (100 tokens)
├── servers/
├── __init__.py (300 tokens)
├── basic_prompt.py (100 tokens)
├── basic_resource.py (100 tokens)
├── basic_tool.py (100 tokens)
├── completion.py (300 tokens)
├── direct_call_tool_result.py (200 tokens)
├── direct_execution.py (100 tokens)
├── elicitation.py (300 tokens)
├── fastmcp_quickstart.py (200 tokens)
├── images.py (100 tokens)
├── lifespan_example.py (300 tokens)
├── lowlevel/
├── __init__.py
├── basic.py (400 tokens)
├── direct_call_tool_result.py (400 tokens)
├── lifespan.py (600 tokens)
├── structured_output.py (600 tokens)
├── notifications.py (100 tokens)
├── oauth_server.py (300 tokens)
├── pagination_example.py (200 tokens)
├── sampling.py (100 tokens)
├── streamable_config.py (200 tokens)
├── streamable_http_basic_mounting.py (100 tokens)
├── streamable_http_host_mounting.py (100 tokens)
├── streamable_http_multiple_servers.py (200 tokens)
├── streamable_http_path_config.py (200 tokens)
├── streamable_starlette_mount.py (300 tokens)
├── structured_output.py (500 tokens)
├── tool_progress.py (100 tokens)
├── mkdocs.yml (700 tokens)
├── pyproject.toml (1000 tokens)
├── scripts/
├── update_readme_snippets.py (1000 tokens)
├── src/
├── mcp/
├── __init__.py (500 tokens)
├── cli/
├── __init__.py
├── claude.py (1000 tokens)
├── cli.py (3k tokens)
├── client/
├── __init__.py
├── __main__.py (500 tokens)
├── auth/
├── __init__.py (100 tokens)
├── extensions/
├── __init__.py
├── client_credentials.py (1400 tokens)
├── oauth2.py (5.8k tokens)
├── session.py (4.1k tokens)
├── session_group.py (2.8k tokens)
├── sse.py (1400 tokens)
├── stdio/
├── __init__.py (1800 tokens)
├── streamable_http.py (4k tokens)
├── websocket.py (700 tokens)
├── os/
├── __init__.py
├── posix/
├── __init__.py
├── utilities.py (400 tokens)
├── win32/
├── __init__.py
├── utilities.py (2.2k tokens)
├── py.typed
├── server/
├── __init__.py
├── __main__.py (300 tokens)
├── auth/
├── __init__.py
├── errors.py
├── handlers/
├── __init__.py
├── authorize.py (1900 tokens)
├── metadata.py (200 tokens)
├── register.py (1100 tokens)
├── revoke.py (600 tokens)
├── token.py (2k tokens)
├── json_response.py (100 tokens)
├── middleware/
├── __init__.py
├── auth_context.py (300 tokens)
├── bearer_auth.py (900 tokens)
├── client_auth.py (400 tokens)
├── provider.py (2000 tokens)
├── routes.py (1800 tokens)
├── settings.py (200 tokens)
├── elicitation.py (800 tokens)
├── fastmcp/
├── __init__.py (100 tokens)
├── exceptions.py (100 tokens)
├── prompts/
├── __init__.py
├── base.py (1400 tokens)
├── manager.py (400 tokens)
├── resources/
├── __init__.py (100 tokens)
├── base.py (300 tokens)
├── resource_manager.py (800 tokens)
├── templates.py (900 tokens)
├── types.py (1400 tokens)
├── server.py (9.6k tokens)
├── tools/
├── __init__.py
├── base.py (900 tokens)
├── tool_manager.py (600 tokens)
├── utilities/
├── __init__.py
├── context_injection.py (400 tokens)
├── func_metadata.py (4.5k tokens)
├── logging.py (200 tokens)
├── types.py (700 tokens)
├── lowlevel/
├── __init__.py
├── func_inspection.py (500 tokens)
├── helper_types.py
├── server.py (5.9k tokens)
├── models.py (100 tokens)
├── session.py (2.5k tokens)
├── sse.py (2.2k tokens)
├── stdio.py (700 tokens)
├── streamable_http.py (7.7k tokens)
├── streamable_http_manager.py (2.2k tokens)
├── streaming_asgi_transport.py (1500 tokens)
├── transport_security.py (900 tokens)
├── websocket.py (500 tokens)
├── shared/
├── __init__.py
├── _httpx_utils.py (500 tokens)
├── auth.py (1300 tokens)
├── auth_utils.py (500 tokens)
├── context.py (100 tokens)
├── exceptions.py (100 tokens)
├── memory.py (800 tokens)
├── message.py (200 tokens)
├── metadata_utils.py (300 tokens)
├── progress.py (300 tokens)
├── session.py (4.1k tokens)
├── version.py
├── types.py (8.9k tokens)
├── tests/
├── __init__.py
├── cli/
├── __init__.py
├── test_utils.py (600 tokens)
├── client/
├── __init__.py
├── auth/
├── extensions/
├── test_client_credentials.py (1400 tokens)
├── conftest.py (1000 tokens)
├── test_auth.py (10.9k tokens)
├── test_config.py (500 tokens)
├── test_http_unicode.py (1700 tokens)
├── test_list_methods_cursor.py (1500 tokens)
├── test_list_roots_callback.py (400 tokens)
├── test_logging_callback.py (500 tokens)
├── test_notification_response.py (1000 tokens)
├── test_output_schema_validation.py (1700 tokens)
├── test_resource_cleanup.py (500 tokens)
├── test_sampling_callback.py (400 tokens)
├── test_session.py (5.2k tokens)
├── test_session_group.py (3.4k tokens)
├── test_stdio.py (4.6k tokens)
├── conftest.py
├── issues/
├── test_100_tool_listing.py (200 tokens)
├── test_1027_win_unreachable_cleanup.py (1900 tokens)
├── test_129_resource_templates.py (300 tokens)
├── test_1338_icons_and_metadata.py (900 tokens)
├── test_141_resource_templates.py (1000 tokens)
├── test_152_resource_mime_type.py (1000 tokens)
├── test_176_progress_token.py (300 tokens)
├── test_188_concurrency.py (600 tokens)
├── test_192_request_id.py (700 tokens)
├── test_342_base64_encoding.py (600 tokens)
├── test_355_type_error.py (200 tokens)
├── test_552_windows_hang.py (400 tokens)
├── test_88_random_error.py (1000 tokens)
├── test_malformed_input.py (1300 tokens)
├── server/
├── __init__.py
├── auth/
├── middleware/
├── test_auth_context.py (700 tokens)
├── test_bearer_auth.py (3.4k tokens)
├── test_error_handling.py (2.2k tokens)
├── test_protected_resource.py (1600 tokens)
├── test_provider.py (700 tokens)
├── fastmcp/
├── __init__.py
├── auth/
├── __init__.py
├── test_auth_integration.py (10.1k tokens)
├── prompts/
├── __init__.py
├── test_base.py (1300 tokens)
├── test_manager.py (700 tokens)
├── resources/
├── __init__.py
├── test_file_resources.py (800 tokens)
├── test_function_resources.py (900 tokens)
├── test_resource_manager.py (900 tokens)
├── test_resource_template.py (1600 tokens)
├── test_resources.py (1200 tokens)
├── servers/
├── __init__.py
├── test_file_server.py (700 tokens)
├── test_elicitation.py (2.2k tokens)
├── test_func_metadata.py (8.2k tokens)
├── test_integration.py (5.3k tokens)
├── test_parameter_descriptions.py (200 tokens)
├── test_server.py (10.6k tokens)
├── test_title.py (1600 tokens)
├── test_tool_manager.py (6.2k tokens)
├── lowlevel/
├── __init__.py
├── test_func_inspection.py (2.1k tokens)
├── test_server_listing.py (1100 tokens)
├── test_server_pagination.py (800 tokens)
├── test_cancel_handling.py (700 tokens)
├── test_completion_with_context.py (1500 tokens)
├── test_lifespan.py (1600 tokens)
├── test_lowlevel_exception_handling.py (500 tokens)
├── test_lowlevel_input_validation.py (2.3k tokens)
├── test_lowlevel_output_validation.py (3.3k tokens)
├── test_lowlevel_tool_annotations.py (700 tokens)
├── test_read_resource.py (700 tokens)
├── test_session.py (2.4k tokens)
├── test_session_race_condition.py (1200 tokens)
├── test_sse_security.py (2.2k tokens)
├── test_stdio.py (500 tokens)
├── test_streamable_http_manager.py (1900 tokens)
├── test_streamable_http_security.py (2.1k tokens)
├── shared/
├── test_auth.py (500 tokens)
├── test_auth_utils.py (1300 tokens)
├── test_httpx_utils.py (100 tokens)
├── test_memory.py (200 tokens)
├── test_progress_notifications.py (2.9k tokens)
├── test_session.py (1200 tokens)
├── test_sse.py (3.5k tokens)
├── test_streamable_http.py (11.7k tokens)
├── test_win32_utils.py (100 tokens)
├── test_ws.py (1300 tokens)
├── test_examples.py (900 tokens)
├── test_helpers.py (200 tokens)
├── test_types.py (400 tokens)
├── uv.lock (omitted)
```
## /.git-blame-ignore-revs
```git-blame-ignore-revs path="/.git-blame-ignore-revs"
# Applied 120 line-length rule to all files: https://github.com/modelcontextprotocol/python-sdk/pull/856
543961968c0634e93d919d509cce23a1d6a56c21
```
## /.gitattribute
```gitattribute path="/.gitattribute"
# Generated
uv.lock linguist-generated=true
```
## /.github/ISSUE_TEMPLATE/bug.yaml
```yaml path="/.github/ISSUE_TEMPLATE/bug.yaml"
name: 🐛 MCP Python SDK Bug
description: Report a bug or unexpected behavior in the MCP Python SDK
labels: ["need confirmation"]
body:
- type: markdown
attributes:
value: Thank you for contributing to the MCP Python SDK! ✊
- type: checkboxes
id: checks
attributes:
label: Initial Checks
description: Just making sure you're using the latest version of MCP Python SDK.
options:
- label: I confirm that I'm using the latest version of MCP Python SDK
required: true
- label: I confirm that I searched for my issue in https://github.com/modelcontextprotocol/python-sdk/issues before opening this issue
required: true
- type: textarea
id: description
attributes:
label: Description
description: |
Please explain what you're seeing and what you would expect to see.
Please provide as much detail as possible to make understanding and solving your problem as quick as possible. 🙏
validations:
required: true
- type: textarea
id: example
attributes:
label: Example Code
description: >
If applicable, please add a self-contained,
[minimal, reproducible, example](https://stackoverflow.com/help/minimal-reproducible-example)
demonstrating the bug.
placeholder: |
from mcp.server.fastmcp import FastMCP
...
render: Python
- type: textarea
id: version
attributes:
label: Python & MCP Python SDK
description: |
Which version of Python and MCP Python SDK are you using?
render: Text
validations:
required: true
```
## /.github/ISSUE_TEMPLATE/config.yaml
```yaml path="/.github/ISSUE_TEMPLATE/config.yaml"
blank_issues_enabled: false
```
## /.github/ISSUE_TEMPLATE/feature-request.yaml
```yaml path="/.github/ISSUE_TEMPLATE/feature-request.yaml"
name: 🚀 MCP Python SDK Feature Request
description: "Suggest a new feature for the MCP Python SDK"
labels: ["feature request"]
body:
- type: markdown
attributes:
value: Thank you for contributing to the MCP Python SDK! ✊
- type: textarea
id: description
attributes:
label: Description
description: |
Please give as much detail as possible about the feature you would like to suggest. 🙏
You might like to add:
* A demo of how code might look when using the feature
* Your use case(s) for the feature
* Reference to other projects that have a similar feature
validations:
required: true
- type: textarea
id: references
attributes:
label: References
description: |
Please add any links or references that might help us understand your feature request better. 📚
```
## /.github/ISSUE_TEMPLATE/question.yaml
```yaml path="/.github/ISSUE_TEMPLATE/question.yaml"
name: ❓ MCP Python SDK Question
description: "Ask a question about the MCP Python SDK"
labels: ["question"]
body:
- type: markdown
attributes:
value: Thank you for reaching out to the MCP Python SDK community! We're here to help! 🤝
- type: textarea
id: question
attributes:
label: Question
description: |
Please provide as much detail as possible about your question. 🙏
You might like to include:
* Code snippets showing what you've tried
* Error messages you're encountering (if any)
* Expected vs actual behavior
* Your use case and what you're trying to achieve
validations:
required: true
- type: textarea
id: context
attributes:
label: Additional Context
description: |
Please provide any additional context that might help us better understand your question, such as:
* Your MCP Python SDK version
* Your Python version
* Relevant configuration or environment details 📝
```
## /.github/workflows/main-checks.yml
```yml path="/.github/workflows/main-checks.yml"
name: Main branch checks
on:
push:
branches:
- main
- "v*.*.*"
tags:
- "v*.*.*"
jobs:
checks:
uses: ./.github/workflows/shared.yml
```
## /.github/workflows/publish-docs-manually.yml
```yml path="/.github/workflows/publish-docs-manually.yml"
name: Publish Docs manually
on:
workflow_dispatch:
jobs:
docs-publish:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- uses: actions/checkout@v4
- name: Configure Git Credentials
run: |
git config user.name github-actions[bot]
git config user.email 41898282+github-actions[bot]@users.noreply.github.com
- name: Install uv
uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: 0.9.5
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
- uses: actions/cache@v4
with:
key: mkdocs-material-${{ env.cache_id }}
path: .cache
restore-keys: |
mkdocs-material-
- run: uv sync --frozen --group docs
- run: uv run --frozen --no-sync mkdocs gh-deploy --force
```
## /.github/workflows/publish-pypi.yml
```yml path="/.github/workflows/publish-pypi.yml"
name: Publishing
on:
release:
types: [published]
jobs:
release-build:
name: Build distribution
runs-on: ubuntu-latest
needs: [checks]
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: 0.9.5
- name: Set up Python 3.12
run: uv python install 3.12
- name: Build
run: uv build
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: release-dists
path: dist/
checks:
uses: ./.github/workflows/shared.yml
pypi-publish:
name: Upload release to PyPI
runs-on: ubuntu-latest
environment: release
needs:
- release-build
permissions:
id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
steps:
- name: Retrieve release distributions
uses: actions/download-artifact@v4
with:
name: release-dists
path: dist/
- name: Publish package distributions to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
docs-publish:
runs-on: ubuntu-latest
needs: ["pypi-publish"]
permissions:
contents: write
steps:
- uses: actions/checkout@v4
- name: Configure Git Credentials
run: |
git config user.name github-actions[bot]
git config user.email 41898282+github-actions[bot]@users.noreply.github.com
- name: Install uv
uses: astral-sh/setup-uv@v3
with:
enable-cache: true
version: 0.9.5
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
- uses: actions/cache@v4
with:
key: mkdocs-material-${{ env.cache_id }}
path: .cache
restore-keys: |
mkdocs-material-
- run: uv sync --frozen --group docs
- run: uv run --frozen --no-sync mkdocs gh-deploy --force
```
## /.github/workflows/pull-request-checks.yml
```yml path="/.github/workflows/pull-request-checks.yml"
name: Pull request checks
on:
pull_request:
jobs:
checks:
uses: ./.github/workflows/shared.yml
```
## /.github/workflows/shared.yml
```yml path="/.github/workflows/shared.yml"
name: Shared Checks
on:
workflow_call:
permissions:
contents: read
env:
COLUMNS: 150
jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: astral-sh/setup-uv@v7
with:
enable-cache: true
version: 0.9.5
- name: Install dependencies
run: uv sync --frozen --all-extras --python 3.10
- uses: pre-commit/action@v3.0.1
with:
extra_args: --all-files --verbose
env:
SKIP: no-commit-to-branch
test:
name: test (${{ matrix.python-version }}, ${{ matrix.dep-resolution.name }}, ${{ matrix.os }})
runs-on: ${{ matrix.os }}
timeout-minutes: 10
continue-on-error: true
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13"]
dep-resolution:
- name: lowest-direct
install-flags: "--resolution lowest-direct"
- name: highest
install-flags: "--frozen"
os: [ubuntu-latest, windows-latest]
steps:
- uses: actions/checkout@v5
- name: Install uv
uses: astral-sh/setup-uv@v7
with:
enable-cache: true
version: 0.9.5
- name: Install the project
run: uv sync ${{ matrix.dep-resolution.install-flags }} --all-extras --python ${{ matrix.python-version }}
- name: Run pytest
run: uv run ${{ matrix.dep-resolution.install-flags }} --no-sync pytest
env:
UV_RESOLUTION: ${{ matrix.dep-resolution.name == 'lowest-direct' && 'lowest-direct' || 'highest' }}
readme-snippets:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: astral-sh/setup-uv@v7
with:
enable-cache: true
version: 0.9.5
- name: Install dependencies
run: uv sync --frozen --all-extras --python 3.10
- name: Check README snippets are up to date
run: uv run --frozen scripts/update_readme_snippets.py --check
```
## /.gitignore
```gitignore path="/.gitignore"
.DS_Store
scratch/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
.ruff_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
# vscode
.vscode/
.windsurfrules
**/CLAUDE.local.md
# claude code
.claude/
```
## /.pre-commit-config.yaml
```yaml path="/.pre-commit-config.yaml"
fail_fast: true
repos:
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.1.0
hooks:
- id: prettier
types_or: [yaml, json5]
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.45.0
hooks:
- id: markdownlint
args:
[
"--fix",
"--config",
"pyproject.toml",
"--configPointer",
"/tool/markdown/lint",
]
types: [markdown]
- repo: local
hooks:
- id: ruff-format
name: Ruff Format
entry: uv run --frozen ruff
args: [format]
language: system
types: [python]
pass_filenames: false
- id: ruff
name: Ruff
entry: uv run --frozen ruff
args: ["check", "--fix", "--exit-non-zero-on-fix"]
types: [python]
language: system
pass_filenames: false
exclude: ^README\.md$
- id: pyright
name: pyright
entry: uv run --frozen pyright
language: system
types: [python]
pass_filenames: false
- id: uv-lock-check
name: Check uv.lock is up to date
entry: uv lock --check
language: system
files: ^(pyproject\.toml|uv\.lock)$
pass_filenames: false
- id: readme-snippets
name: Check README snippets are up to date
entry: uv run --frozen python scripts/update_readme_snippets.py --check
language: system
files: ^(README\.md|examples/.*\.py|scripts/update_readme_snippets\.py)$
pass_filenames: false
```
## /CLAUDE.md
# Development Guidelines
This document contains critical information about working with this codebase. Follow these guidelines precisely.
## Core Development Rules
1. Package Management
- ONLY use uv, NEVER pip
- Installation: `uv add package`
- Running tools: `uv run tool`
- Upgrading: `uv add --dev package --upgrade-package package`
- FORBIDDEN: `uv pip install`, `@latest` syntax
2. Code Quality
- Type hints required for all code
- Public APIs must have docstrings
- Functions must be focused and small
- Follow existing patterns exactly
- Line length: 120 chars maximum
3. Testing Requirements
- Framework: `uv run --frozen pytest`
- Async testing: use anyio, not asyncio
- Coverage: test edge cases and errors
- New features require tests
- Bug fixes require regression tests
- For commits fixing bugs or adding features based on user reports add:
```bash
git commit --trailer "Reported-by:<name>"
```
Where `<name>` is the name of the user.
- For commits related to a Github issue, add
```bash
git commit --trailer "Github-Issue:#<number>"
```
- NEVER ever mention a `co-authored-by` or similar aspects. In particular, never
mention the tool used to create the commit message or PR.
## Pull Requests
- Create a detailed message of what changed. Focus on the high level description of
the problem it tries to solve, and how it is solved. Don't go into the specifics of the
code unless it adds clarity.
- NEVER ever mention a `co-authored-by` or similar aspects. In particular, never
mention the tool used to create the commit message or PR.
## Python Tools
## Code Formatting
1. Ruff
- Format: `uv run --frozen ruff format .`
- Check: `uv run --frozen ruff check .`
- Fix: `uv run --frozen ruff check . --fix`
- Critical issues:
- Line length (88 chars)
- Import sorting (I001)
- Unused imports
- Line wrapping:
- Strings: use parentheses
- Function calls: multi-line with proper indent
- Imports: split into multiple lines
2. Type Checking
- Tool: `uv run --frozen pyright`
- Requirements:
- Explicit None checks for Optional
- Type narrowing for strings
- Version warnings can be ignored if checks pass
3. Pre-commit
- Config: `.pre-commit-config.yaml`
- Runs: on git commit
- Tools: Prettier (YAML/JSON), Ruff (Python)
- Ruff updates:
- Check PyPI versions
- Update config rev
- Commit config first
## Error Resolution
1. CI Failures
- Fix order:
1. Formatting
2. Type errors
3. Linting
- Type errors:
- Get full line context
- Check Optional types
- Add type narrowing
- Verify function signatures
2. Common Issues
- Line length:
- Break strings with parentheses
- Multi-line function calls
- Split imports
- Types:
- Add None checks
- Narrow string types
- Match existing patterns
- Pytest:
- If the tests aren't finding the anyio pytest mark, try adding PYTEST_DISABLE_PLUGIN_AUTOLOAD=""
to the start of the pytest run command eg:
`PYTEST_DISABLE_PLUGIN_AUTOLOAD="" uv run --frozen pytest`
3. Best Practices
- Check git status before commits
- Run formatters before type checks
- Keep changes minimal
- Follow existing patterns
- Document public APIs
- Test thoroughly
## Exception Handling
- **Always use `logger.exception()` instead of `logger.error()` when catching exceptions**
- Don't include the exception in the message: `logger.exception("Failed")` not `logger.exception(f"Failed: {e}")`
- **Catch specific exceptions** where possible:
- File ops: `except (OSError, PermissionError):`
- JSON: `except json.JSONDecodeError:`
- Network: `except (ConnectionError, TimeoutError):`
- **Only catch `Exception` for**:
- Top-level handlers that must not crash
- Cleanup blocks (log at debug level)
## /CODE_OF_CONDUCT.md
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
<mcp-coc@anthropic.com>.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
<https://www.contributor-covenant.org/version/2/0/code_of_conduct.html>.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
<https://www.contributor-covenant.org/faq>. Translations are available at
<https://www.contributor-covenant.org/translations>.
## /CONTRIBUTING.md
# Contributing
Thank you for your interest in contributing to the MCP Python SDK! This document provides guidelines and instructions for contributing.
## Development Setup
1. Make sure you have Python 3.10+ installed
2. Install [uv](https://docs.astral.sh/uv/getting-started/installation/)
3. Fork the repository
4. Clone your fork: `git clone https://github.com/YOUR-USERNAME/python-sdk.git`
5. Install dependencies:
```bash
uv sync --frozen --all-extras --dev
```
6. Set up pre-commit hooks:
```bash
uv tool install pre-commit --with pre-commit-uv --force-reinstall
```
## Development Workflow
1. Choose the correct branch for your changes:
- For bug fixes to a released version: use the latest release branch (e.g. v1.1.x for 1.1.3)
- For new features: use the main branch (which will become the next minor/major version)
- If unsure, ask in an issue first
2. Create a new branch from your chosen base branch
3. Make your changes
4. Ensure tests pass:
```bash
uv run pytest
```
5. Run type checking:
```bash
uv run pyright
```
6. Run linting:
```bash
uv run ruff check .
uv run ruff format .
```
7. Update README snippets if you modified example code:
```bash
uv run scripts/update_readme_snippets.py
```
8. (Optional) Run pre-commit hooks on all files:
```bash
pre-commit run --all-files
```
9. Submit a pull request to the same branch you branched from
## Code Style
- We use `ruff` for linting and formatting
- Follow PEP 8 style guidelines
- Add type hints to all functions
- Include docstrings for public APIs
## Pull Request Process
1. Update documentation as needed
2. Add tests for new functionality
3. Ensure CI passes
4. Maintainers will review your code
5. Address review feedback
## Code of Conduct
Please note that this project is released with a [Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
## License
By contributing, you agree that your contributions will be licensed under the MIT License.
## /README.md
# MCP Python SDK
<div align="center">
<strong>Python implementation of the Model Context Protocol (MCP)</strong>
[![PyPI][pypi-badge]][pypi-url]
[![MIT licensed][mit-badge]][mit-url]
[![Python Version][python-badge]][python-url]
[![Documentation][docs-badge]][docs-url]
[![Protocol][protocol-badge]][protocol-url]
[![Specification][spec-badge]][spec-url]
</div>
<!-- omit in toc -->
## Table of Contents
- [MCP Python SDK](#mcp-python-sdk)
- [Overview](#overview)
- [Installation](#installation)
- [Adding MCP to your python project](#adding-mcp-to-your-python-project)
- [Running the standalone MCP development tools](#running-the-standalone-mcp-development-tools)
- [Quickstart](#quickstart)
- [What is MCP?](#what-is-mcp)
- [Core Concepts](#core-concepts)
- [Server](#server)
- [Resources](#resources)
- [Tools](#tools)
- [Structured Output](#structured-output)
- [Prompts](#prompts)
- [Images](#images)
- [Context](#context)
- [Getting Context in Functions](#getting-context-in-functions)
- [Context Properties and Methods](#context-properties-and-methods)
- [Completions](#completions)
- [Elicitation](#elicitation)
- [Sampling](#sampling)
- [Logging and Notifications](#logging-and-notifications)
- [Authentication](#authentication)
- [FastMCP Properties](#fastmcp-properties)
- [Session Properties and Methods](#session-properties-and-methods)
- [Request Context Properties](#request-context-properties)
- [Running Your Server](#running-your-server)
- [Development Mode](#development-mode)
- [Claude Desktop Integration](#claude-desktop-integration)
- [Direct Execution](#direct-execution)
- [Streamable HTTP Transport](#streamable-http-transport)
- [CORS Configuration for Browser-Based Clients](#cors-configuration-for-browser-based-clients)
- [Mounting to an Existing ASGI Server](#mounting-to-an-existing-asgi-server)
- [StreamableHTTP servers](#streamablehttp-servers)
- [Basic mounting](#basic-mounting)
- [Host-based routing](#host-based-routing)
- [Multiple servers with path configuration](#multiple-servers-with-path-configuration)
- [Path configuration at initialization](#path-configuration-at-initialization)
- [SSE servers](#sse-servers)
- [Advanced Usage](#advanced-usage)
- [Low-Level Server](#low-level-server)
- [Structured Output Support](#structured-output-support)
- [Pagination (Advanced)](#pagination-advanced)
- [Writing MCP Clients](#writing-mcp-clients)
- [Client Display Utilities](#client-display-utilities)
- [OAuth Authentication for Clients](#oauth-authentication-for-clients)
- [Parsing Tool Results](#parsing-tool-results)
- [MCP Primitives](#mcp-primitives)
- [Server Capabilities](#server-capabilities)
- [Documentation](#documentation)
- [Contributing](#contributing)
- [License](#license)
[pypi-badge]: https://img.shields.io/pypi/v/mcp.svg
[pypi-url]: https://pypi.org/project/mcp/
[mit-badge]: https://img.shields.io/pypi/l/mcp.svg
[mit-url]: https://github.com/modelcontextprotocol/python-sdk/blob/main/LICENSE
[python-badge]: https://img.shields.io/pypi/pyversions/mcp.svg
[python-url]: https://www.python.org/downloads/
[docs-badge]: https://img.shields.io/badge/docs-python--sdk-blue.svg
[docs-url]: https://modelcontextprotocol.github.io/python-sdk/
[protocol-badge]: https://img.shields.io/badge/protocol-modelcontextprotocol.io-blue.svg
[protocol-url]: https://modelcontextprotocol.io
[spec-badge]: https://img.shields.io/badge/spec-spec.modelcontextprotocol.io-blue.svg
[spec-url]: https://modelcontextprotocol.io/specification/latest
## Overview
The Model Context Protocol allows applications to provide context for LLMs in a standardized way, separating the concerns of providing context from the actual LLM interaction. This Python SDK implements the full MCP specification, making it easy to:
- Build MCP clients that can connect to any MCP server
- Create MCP servers that expose resources, prompts and tools
- Use standard transports like stdio, SSE, and Streamable HTTP
- Handle all MCP protocol messages and lifecycle events
## Installation
### Adding MCP to your python project
We recommend using [uv](https://docs.astral.sh/uv/) to manage your Python projects.
If you haven't created a uv-managed project yet, create one:
```bash
uv init mcp-server-demo
cd mcp-server-demo
```
Then add MCP to your project dependencies:
```bash
uv add "mcp[cli]"
```
Alternatively, for projects using pip for dependencies:
```bash
pip install "mcp[cli]"
```
### Running the standalone MCP development tools
To run the mcp command with uv:
```bash
uv run mcp
```
## Quickstart
Let's create a simple MCP server that exposes a calculator tool and some data:
<!-- snippet-source examples/snippets/servers/fastmcp_quickstart.py -->
```python
"""
FastMCP quickstart example.
cd to the `examples/snippets/clients` directory and run:
uv run server fastmcp_quickstart stdio
"""
from mcp.server.fastmcp import FastMCP
# Create an MCP server
mcp = FastMCP("Demo")
# Add an addition tool
@mcp.tool()
def add(a: int, b: int) -> int:
"""Add two numbers"""
return a + b
# Add a dynamic greeting resource
@mcp.resource("greeting://{name}")
def get_greeting(name: str) -> str:
"""Get a personalized greeting"""
return f"Hello, {name}!"
# Add a prompt
@mcp.prompt()
def greet_user(name: str, style: str = "friendly") -> str:
"""Generate a greeting prompt"""
styles = {
"friendly": "Please write a warm, friendly greeting",
"formal": "Please write a formal, professional greeting",
"casual": "Please write a casual, relaxed greeting",
}
return f"{styles.get(style, styles['friendly'])} for someone named {name}."
```
_Full example: [examples/snippets/servers/fastmcp_quickstart.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/fastmcp_quickstart.py)_
<!-- /snippet-source -->
You can install this server in [Claude Desktop](https://claude.ai/download) and interact with it right away by running:
```bash
uv run mcp install server.py
```
Alternatively, you can test it with the MCP Inspector:
```bash
uv run mcp dev server.py
```
## What is MCP?
The [Model Context Protocol (MCP)](https://modelcontextprotocol.io) lets you build servers that expose data and functionality to LLM applications in a secure, standardized way. Think of it like a web API, but specifically designed for LLM interactions. MCP servers can:
- Expose data through **Resources** (think of these sort of like GET endpoints; they are used to load information into the LLM's context)
- Provide functionality through **Tools** (sort of like POST endpoints; they are used to execute code or otherwise produce a side effect)
- Define interaction patterns through **Prompts** (reusable templates for LLM interactions)
- And more!
## Core Concepts
### Server
The FastMCP server is your core interface to the MCP protocol. It handles connection management, protocol compliance, and message routing:
<!-- snippet-source examples/snippets/servers/lifespan_example.py -->
```python
"""Example showing lifespan support for startup/shutdown with strong typing."""
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from dataclasses import dataclass
from mcp.server.fastmcp import Context, FastMCP
from mcp.server.session import ServerSession
# Mock database class for example
class Database:
"""Mock database class for example."""
@classmethod
async def connect(cls) -> "Database":
"""Connect to database."""
return cls()
async def disconnect(self) -> None:
"""Disconnect from database."""
pass
def query(self) -> str:
"""Execute a query."""
return "Query result"
@dataclass
class AppContext:
"""Application context with typed dependencies."""
db: Database
@asynccontextmanager
async def app_lifespan(server: FastMCP) -> AsyncIterator[AppContext]:
"""Manage application lifecycle with type-safe context."""
# Initialize on startup
db = await Database.connect()
try:
yield AppContext(db=db)
finally:
# Cleanup on shutdown
await db.disconnect()
# Pass lifespan to server
mcp = FastMCP("My App", lifespan=app_lifespan)
# Access type-safe lifespan context in tools
@mcp.tool()
def query_db(ctx: Context[ServerSession, AppContext]) -> str:
"""Tool that uses initialized resources."""
db = ctx.request_context.lifespan_context.db
return db.query()
```
_Full example: [examples/snippets/servers/lifespan_example.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lifespan_example.py)_
<!-- /snippet-source -->
### Resources
Resources are how you expose data to LLMs. They're similar to GET endpoints in a REST API - they provide data but shouldn't perform significant computation or have side effects:
<!-- snippet-source examples/snippets/servers/basic_resource.py -->
```python
from mcp.server.fastmcp import FastMCP
mcp = FastMCP(name="Resource Example")
@mcp.resource("file://documents/{name}")
def read_document(name: str) -> str:
"""Read a document by name."""
# This would normally read from disk
return f"Content of {name}"
@mcp.resource("config://settings")
def get_settings() -> str:
"""Get application settings."""
return """{
"theme": "dark",
"language": "en",
"debug": false
}"""
```
_Full example: [examples/snippets/servers/basic_resource.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/basic_resource.py)_
<!-- /snippet-source -->
### Tools
Tools let LLMs take actions through your server. Unlike resources, tools are expected to perform computation and have side effects:
<!-- snippet-source examples/snippets/servers/basic_tool.py -->
```python
from mcp.server.fastmcp import FastMCP
mcp = FastMCP(name="Tool Example")
@mcp.tool()
def sum(a: int, b: int) -> int:
"""Add two numbers together."""
return a + b
@mcp.tool()
def get_weather(city: str, unit: str = "celsius") -> str:
"""Get weather for a city."""
# This would normally call a weather API
return f"Weather in {city}: 22degrees{unit[0].upper()}"
```
_Full example: [examples/snippets/servers/basic_tool.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/basic_tool.py)_
<!-- /snippet-source -->
Tools can optionally receive a Context object by including a parameter with the `Context` type annotation. This context is automatically injected by the FastMCP framework and provides access to MCP capabilities:
<!-- snippet-source examples/snippets/servers/tool_progress.py -->
```python
from mcp.server.fastmcp import Context, FastMCP
from mcp.server.session import ServerSession
mcp = FastMCP(name="Progress Example")
@mcp.tool()
async def long_running_task(task_name: str, ctx: Context[ServerSession, None], steps: int = 5) -> str:
"""Execute a task with progress updates."""
await ctx.info(f"Starting: {task_name}")
for i in range(steps):
progress = (i + 1) / steps
await ctx.report_progress(
progress=progress,
total=1.0,
message=f"Step {i + 1}/{steps}",
)
await ctx.debug(f"Completed step {i + 1}")
return f"Task '{task_name}' completed"
```
_Full example: [examples/snippets/servers/tool_progress.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/tool_progress.py)_
<!-- /snippet-source -->
#### Structured Output
Tools will return structured results by default, if their return type
annotation is compatible. Otherwise, they will return unstructured results.
Structured output supports these return types:
- Pydantic models (BaseModel subclasses)
- TypedDicts
- Dataclasses and other classes with type hints
- `dict[str, T]` (where T is any JSON-serializable type)
- Primitive types (str, int, float, bool, bytes, None) - wrapped in `{"result": value}`
- Generic types (list, tuple, Union, Optional, etc.) - wrapped in `{"result": value}`
Classes without type hints cannot be serialized for structured output. Only
classes with properly annotated attributes will be converted to Pydantic models
for schema generation and validation.
Structured results are automatically validated against the output schema
generated from the annotation. This ensures the tool returns well-typed,
validated data that clients can easily process.
**Note:** For backward compatibility, unstructured results are also
returned. Unstructured results are provided for backward compatibility
with previous versions of the MCP specification, and are quirks-compatible
with previous versions of FastMCP in the current version of the SDK.
**Note:** In cases where a tool function's return type annotation
causes the tool to be classified as structured _and this is undesirable_,
the classification can be suppressed by passing `structured_output=False`
to the `@tool` decorator.
##### Advanced: Direct CallToolResult
For full control over tool responses including the `_meta` field (for passing data to client applications without exposing it to the model), you can return `CallToolResult` directly:
<!-- snippet-source examples/snippets/servers/direct_call_tool_result.py -->
```python
"""Example showing direct CallToolResult return for advanced control."""
from typing import Annotated
from pydantic import BaseModel
from mcp.server.fastmcp import FastMCP
from mcp.types import CallToolResult, TextContent
mcp = FastMCP("CallToolResult Example")
class ValidationModel(BaseModel):
"""Model for validating structured output."""
status: str
data: dict[str, int]
@mcp.tool()
def advanced_tool() -> CallToolResult:
"""Return CallToolResult directly for full control including _meta field."""
return CallToolResult(
content=[TextContent(type="text", text="Response visible to the model")],
_meta={"hidden": "data for client applications only"},
)
@mcp.tool()
def validated_tool() -> Annotated[CallToolResult, ValidationModel]:
"""Return CallToolResult with structured output validation."""
return CallToolResult(
content=[TextContent(type="text", text="Validated response")],
structuredContent={"status": "success", "data": {"result": 42}},
_meta={"internal": "metadata"},
)
@mcp.tool()
def empty_result_tool() -> CallToolResult:
"""For empty results, return CallToolResult with empty content."""
return CallToolResult(content=[])
```
_Full example: [examples/snippets/servers/direct_call_tool_result.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/direct_call_tool_result.py)_
<!-- /snippet-source -->
**Important:** `CallToolResult` must always be returned (no `Optional` or `Union`). For empty results, use `CallToolResult(content=[])`. For optional simple types, use `str | None` without `CallToolResult`.
<!-- snippet-source examples/snippets/servers/structured_output.py -->
```python
"""Example showing structured output with tools."""
from typing import TypedDict
from pydantic import BaseModel, Field
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("Structured Output Example")
# Using Pydantic models for rich structured data
class WeatherData(BaseModel):
"""Weather information structure."""
temperature: float = Field(description="Temperature in Celsius")
humidity: float = Field(description="Humidity percentage")
condition: str
wind_speed: float
@mcp.tool()
def get_weather(city: str) -> WeatherData:
"""Get weather for a city - returns structured data."""
# Simulated weather data
return WeatherData(
temperature=22.5,
humidity=45.0,
condition="sunny",
wind_speed=5.2,
)
# Using TypedDict for simpler structures
class LocationInfo(TypedDict):
latitude: float
longitude: float
name: str
@mcp.tool()
def get_location(address: str) -> LocationInfo:
"""Get location coordinates"""
return LocationInfo(latitude=51.5074, longitude=-0.1278, name="London, UK")
# Using dict[str, Any] for flexible schemas
@mcp.tool()
def get_statistics(data_type: str) -> dict[str, float]:
"""Get various statistics"""
return {"mean": 42.5, "median": 40.0, "std_dev": 5.2}
# Ordinary classes with type hints work for structured output
class UserProfile:
name: str
age: int
email: str | None = None
def __init__(self, name: str, age: int, email: str | None = None):
self.name = name
self.age = age
self.email = email
@mcp.tool()
def get_user(user_id: str) -> UserProfile:
"""Get user profile - returns structured data"""
return UserProfile(name="Alice", age=30, email="alice@example.com")
# Classes WITHOUT type hints cannot be used for structured output
class UntypedConfig:
def __init__(self, setting1, setting2): # type: ignore[reportMissingParameterType]
self.setting1 = setting1
self.setting2 = setting2
@mcp.tool()
def get_config() -> UntypedConfig:
"""This returns unstructured output - no schema generated"""
return UntypedConfig("value1", "value2")
# Lists and other types are wrapped automatically
@mcp.tool()
def list_cities() -> list[str]:
"""Get a list of cities"""
return ["London", "Paris", "Tokyo"]
# Returns: {"result": ["London", "Paris", "Tokyo"]}
@mcp.tool()
def get_temperature(city: str) -> float:
"""Get temperature as a simple float"""
return 22.5
# Returns: {"result": 22.5}
```
_Full example: [examples/snippets/servers/structured_output.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/structured_output.py)_
<!-- /snippet-source -->
### Prompts
Prompts are reusable templates that help LLMs interact with your server effectively:
<!-- snippet-source examples/snippets/servers/basic_prompt.py -->
```python
from mcp.server.fastmcp import FastMCP
from mcp.server.fastmcp.prompts import base
mcp = FastMCP(name="Prompt Example")
@mcp.prompt(title="Code Review")
def review_code(code: str) -> str:
return f"Please review this code:\n\n{code}"
@mcp.prompt(title="Debug Assistant")
def debug_error(error: str) -> list[base.Message]:
return [
base.UserMessage("I'm seeing this error:"),
base.UserMessage(error),
base.AssistantMessage("I'll help debug that. What have you tried so far?"),
]
```
_Full example: [examples/snippets/servers/basic_prompt.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/basic_prompt.py)_
<!-- /snippet-source -->
### Icons
MCP servers can provide icons for UI display. Icons can be added to the server implementation, tools, resources, and prompts:
```python
from mcp.server.fastmcp import FastMCP, Icon
# Create an icon from a file path or URL
icon = Icon(
src="icon.png",
mimeType="image/png",
sizes="64x64"
)
# Add icons to server
mcp = FastMCP(
"My Server",
website_url="https://example.com",
icons=[icon]
)
# Add icons to tools, resources, and prompts
@mcp.tool(icons=[icon])
def my_tool():
"""Tool with an icon."""
return "result"
@mcp.resource("demo://resource", icons=[icon])
def my_resource():
"""Resource with an icon."""
return "content"
```
_Full example: [examples/fastmcp/icons_demo.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/fastmcp/icons_demo.py)_
### Images
FastMCP provides an `Image` class that automatically handles image data:
<!-- snippet-source examples/snippets/servers/images.py -->
```python
"""Example showing image handling with FastMCP."""
from PIL import Image as PILImage
from mcp.server.fastmcp import FastMCP, Image
mcp = FastMCP("Image Example")
@mcp.tool()
def create_thumbnail(image_path: str) -> Image:
"""Create a thumbnail from an image"""
img = PILImage.open(image_path)
img.thumbnail((100, 100))
return Image(data=img.tobytes(), format="png")
```
_Full example: [examples/snippets/servers/images.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/images.py)_
<!-- /snippet-source -->
### Context
The Context object is automatically injected into tool and resource functions that request it via type hints. It provides access to MCP capabilities like logging, progress reporting, resource reading, user interaction, and request metadata.
#### Getting Context in Functions
To use context in a tool or resource function, add a parameter with the `Context` type annotation:
```python
from mcp.server.fastmcp import Context, FastMCP
mcp = FastMCP(name="Context Example")
@mcp.tool()
async def my_tool(x: int, ctx: Context) -> str:
"""Tool that uses context capabilities."""
# The context parameter can have any name as long as it's type-annotated
return await process_with_context(x, ctx)
```
#### Context Properties and Methods
The Context object provides the following capabilities:
- `ctx.request_id` - Unique ID for the current request
- `ctx.client_id` - Client ID if available
- `ctx.fastmcp` - Access to the FastMCP server instance (see [FastMCP Properties](#fastmcp-properties))
- `ctx.session` - Access to the underlying session for advanced communication (see [Session Properties and Methods](#session-properties-and-methods))
- `ctx.request_context` - Access to request-specific data and lifespan resources (see [Request Context Properties](#request-context-properties))
- `await ctx.debug(message)` - Send debug log message
- `await ctx.info(message)` - Send info log message
- `await ctx.warning(message)` - Send warning log message
- `await ctx.error(message)` - Send error log message
- `await ctx.log(level, message, logger_name=None)` - Send log with custom level
- `await ctx.report_progress(progress, total=None, message=None)` - Report operation progress
- `await ctx.read_resource(uri)` - Read a resource by URI
- `await ctx.elicit(message, schema)` - Request additional information from user with validation
<!-- snippet-source examples/snippets/servers/tool_progress.py -->
```python
from mcp.server.fastmcp import Context, FastMCP
from mcp.server.session import ServerSession
mcp = FastMCP(name="Progress Example")
@mcp.tool()
async def long_running_task(task_name: str, ctx: Context[ServerSession, None], steps: int = 5) -> str:
"""Execute a task with progress updates."""
await ctx.info(f"Starting: {task_name}")
for i in range(steps):
progress = (i + 1) / steps
await ctx.report_progress(
progress=progress,
total=1.0,
message=f"Step {i + 1}/{steps}",
)
await ctx.debug(f"Completed step {i + 1}")
return f"Task '{task_name}' completed"
```
_Full example: [examples/snippets/servers/tool_progress.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/tool_progress.py)_
<!-- /snippet-source -->
### Completions
MCP supports providing completion suggestions for prompt arguments and resource template parameters. With the context parameter, servers can provide completions based on previously resolved values:
Client usage:
<!-- snippet-source examples/snippets/clients/completion_client.py -->
```python
"""
cd to the `examples/snippets` directory and run:
uv run completion-client
"""
import asyncio
import os
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from mcp.types import PromptReference, ResourceTemplateReference
# Create server parameters for stdio connection
server_params = StdioServerParameters(
command="uv", # Using uv to run the server
args=["run", "server", "completion", "stdio"], # Server with completion support
env={"UV_INDEX": os.environ.get("UV_INDEX", "")},
)
async def run():
"""Run the completion client example."""
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
# Initialize the connection
await session.initialize()
# List available resource templates
templates = await session.list_resource_templates()
print("Available resource templates:")
for template in templates.resourceTemplates:
print(f" - {template.uriTemplate}")
# List available prompts
prompts = await session.list_prompts()
print("\nAvailable prompts:")
for prompt in prompts.prompts:
print(f" - {prompt.name}")
# Complete resource template arguments
if templates.resourceTemplates:
template = templates.resourceTemplates[0]
print(f"\nCompleting arguments for resource template: {template.uriTemplate}")
# Complete without context
result = await session.complete(
ref=ResourceTemplateReference(type="ref/resource", uri=template.uriTemplate),
argument={"name": "owner", "value": "model"},
)
print(f"Completions for 'owner' starting with 'model': {result.completion.values}")
# Complete with context - repo suggestions based on owner
result = await session.complete(
ref=ResourceTemplateReference(type="ref/resource", uri=template.uriTemplate),
argument={"name": "repo", "value": ""},
context_arguments={"owner": "modelcontextprotocol"},
)
print(f"Completions for 'repo' with owner='modelcontextprotocol': {result.completion.values}")
# Complete prompt arguments
if prompts.prompts:
prompt_name = prompts.prompts[0].name
print(f"\nCompleting arguments for prompt: {prompt_name}")
result = await session.complete(
ref=PromptReference(type="ref/prompt", name=prompt_name),
argument={"name": "style", "value": ""},
)
print(f"Completions for 'style' argument: {result.completion.values}")
def main():
"""Entry point for the completion client."""
asyncio.run(run())
if __name__ == "__main__":
main()
```
_Full example: [examples/snippets/clients/completion_client.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/clients/completion_client.py)_
<!-- /snippet-source -->
### Elicitation
Request additional information from users. This example shows an Elicitation during a Tool Call:
<!-- snippet-source examples/snippets/servers/elicitation.py -->
```python
from pydantic import BaseModel, Field
from mcp.server.fastmcp import Context, FastMCP
from mcp.server.session import ServerSession
mcp = FastMCP(name="Elicitation Example")
class BookingPreferences(BaseModel):
"""Schema for collecting user preferences."""
checkAlternative: bool = Field(description="Would you like to check another date?")
alternativeDate: str = Field(
default="2024-12-26",
description="Alternative date (YYYY-MM-DD)",
)
@mcp.tool()
async def book_table(date: str, time: str, party_size: int, ctx: Context[ServerSession, None]) -> str:
"""Book a table with date availability check."""
# Check if date is available
if date == "2024-12-25":
# Date unavailable - ask user for alternative
result = await ctx.elicit(
message=(f"No tables available for {party_size} on {date}. Would you like to try another date?"),
schema=BookingPreferences,
)
if result.action == "accept" and result.data:
if result.data.checkAlternative:
return f"[SUCCESS] Booked for {result.data.alternativeDate}"
return "[CANCELLED] No booking made"
return "[CANCELLED] Booking cancelled"
# Date available
return f"[SUCCESS] Booked for {date} at {time}"
```
_Full example: [examples/snippets/servers/elicitation.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/elicitation.py)_
<!-- /snippet-source -->
Elicitation schemas support default values for all field types. Default values are automatically included in the JSON schema sent to clients, allowing them to pre-populate forms.
The `elicit()` method returns an `ElicitationResult` with:
- `action`: "accept", "decline", or "cancel"
- `data`: The validated response (only when accepted)
- `validation_error`: Any validation error message
### Sampling
Tools can interact with LLMs through sampling (generating text):
<!-- snippet-source examples/snippets/servers/sampling.py -->
```python
from mcp.server.fastmcp import Context, FastMCP
from mcp.server.session import ServerSession
from mcp.types import SamplingMessage, TextContent
mcp = FastMCP(name="Sampling Example")
@mcp.tool()
async def generate_poem(topic: str, ctx: Context[ServerSession, None]) -> str:
"""Generate a poem using LLM sampling."""
prompt = f"Write a short poem about {topic}"
result = await ctx.session.create_message(
messages=[
SamplingMessage(
role="user",
content=TextContent(type="text", text=prompt),
)
],
max_tokens=100,
)
if result.content.type == "text":
return result.content.text
return str(result.content)
```
_Full example: [examples/snippets/servers/sampling.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/sampling.py)_
<!-- /snippet-source -->
### Logging and Notifications
Tools can send logs and notifications through the context:
<!-- snippet-source examples/snippets/servers/notifications.py -->
```python
from mcp.server.fastmcp import Context, FastMCP
from mcp.server.session import ServerSession
mcp = FastMCP(name="Notifications Example")
@mcp.tool()
async def process_data(data: str, ctx: Context[ServerSession, None]) -> str:
"""Process data with logging."""
# Different log levels
await ctx.debug(f"Debug: Processing '{data}'")
await ctx.info("Info: Starting processing")
await ctx.warning("Warning: This is experimental")
await ctx.error("Error: (This is just a demo)")
# Notify about resource changes
await ctx.session.send_resource_list_changed()
return f"Processed: {data}"
```
_Full example: [examples/snippets/servers/notifications.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/notifications.py)_
<!-- /snippet-source -->
### Authentication
Authentication can be used by servers that want to expose tools accessing protected resources.
`mcp.server.auth` implements OAuth 2.1 resource server functionality, where MCP servers act as Resource Servers (RS) that validate tokens issued by separate Authorization Servers (AS). This follows the [MCP authorization specification](https://modelcontextprotocol.io/specification/2025-06-18/basic/authorization) and implements RFC 9728 (Protected Resource Metadata) for AS discovery.
MCP servers can use authentication by providing an implementation of the `TokenVerifier` protocol:
<!-- snippet-source examples/snippets/servers/oauth_server.py -->
```python
"""
Run from the repository root:
uv run examples/snippets/servers/oauth_server.py
"""
from pydantic import AnyHttpUrl
from mcp.server.auth.provider import AccessToken, TokenVerifier
from mcp.server.auth.settings import AuthSettings
from mcp.server.fastmcp import FastMCP
class SimpleTokenVerifier(TokenVerifier):
"""Simple token verifier for demonstration."""
async def verify_token(self, token: str) -> AccessToken | None:
pass # This is where you would implement actual token validation
# Create FastMCP instance as a Resource Server
mcp = FastMCP(
"Weather Service",
# Token verifier for authentication
token_verifier=SimpleTokenVerifier(),
# Auth settings for RFC 9728 Protected Resource Metadata
auth=AuthSettings(
issuer_url=AnyHttpUrl("https://auth.example.com"), # Authorization Server URL
resource_server_url=AnyHttpUrl("http://localhost:3001"), # This server's URL
required_scopes=["user"],
),
)
@mcp.tool()
async def get_weather(city: str = "London") -> dict[str, str]:
"""Get weather data for a city"""
return {
"city": city,
"temperature": "22",
"condition": "Partly cloudy",
"humidity": "65%",
}
if __name__ == "__main__":
mcp.run(transport="streamable-http")
```
_Full example: [examples/snippets/servers/oauth_server.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/oauth_server.py)_
<!-- /snippet-source -->
For a complete example with separate Authorization Server and Resource Server implementations, see [`examples/servers/simple-auth/`](examples/servers/simple-auth/).
**Architecture:**
- **Authorization Server (AS)**: Handles OAuth flows, user authentication, and token issuance
- **Resource Server (RS)**: Your MCP server that validates tokens and serves protected resources
- **Client**: Discovers AS through RFC 9728, obtains tokens, and uses them with the MCP server
See [TokenVerifier](src/mcp/server/auth/provider.py) for more details on implementing token validation.
### FastMCP Properties
The FastMCP server instance accessible via `ctx.fastmcp` provides access to server configuration and metadata:
- `ctx.fastmcp.name` - The server's name as defined during initialization
- `ctx.fastmcp.instructions` - Server instructions/description provided to clients
- `ctx.fastmcp.website_url` - Optional website URL for the server
- `ctx.fastmcp.icons` - Optional list of icons for UI display
- `ctx.fastmcp.settings` - Complete server configuration object containing:
- `debug` - Debug mode flag
- `log_level` - Current logging level
- `host` and `port` - Server network configuration
- `mount_path`, `sse_path`, `streamable_http_path` - Transport paths
- `stateless_http` - Whether the server operates in stateless mode
- And other configuration options
```python
@mcp.tool()
def server_info(ctx: Context) -> dict:
"""Get information about the current server."""
return {
"name": ctx.fastmcp.name,
"instructions": ctx.fastmcp.instructions,
"debug_mode": ctx.fastmcp.settings.debug,
"log_level": ctx.fastmcp.settings.log_level,
"host": ctx.fastmcp.settings.host,
"port": ctx.fastmcp.settings.port,
}
```
### Session Properties and Methods
The session object accessible via `ctx.session` provides advanced control over client communication:
- `ctx.session.client_params` - Client initialization parameters and declared capabilities
- `await ctx.session.send_log_message(level, data, logger)` - Send log messages with full control
- `await ctx.session.create_message(messages, max_tokens)` - Request LLM sampling/completion
- `await ctx.session.send_progress_notification(token, progress, total, message)` - Direct progress updates
- `await ctx.session.send_resource_updated(uri)` - Notify clients that a specific resource changed
- `await ctx.session.send_resource_list_changed()` - Notify clients that the resource list changed
- `await ctx.session.send_tool_list_changed()` - Notify clients that the tool list changed
- `await ctx.session.send_prompt_list_changed()` - Notify clients that the prompt list changed
```python
@mcp.tool()
async def notify_data_update(resource_uri: str, ctx: Context) -> str:
"""Update data and notify clients of the change."""
# Perform data update logic here
# Notify clients that this specific resource changed
await ctx.session.send_resource_updated(AnyUrl(resource_uri))
# If this affects the overall resource list, notify about that too
await ctx.session.send_resource_list_changed()
return f"Updated {resource_uri} and notified clients"
```
### Request Context Properties
The request context accessible via `ctx.request_context` contains request-specific information and resources:
- `ctx.request_context.lifespan_context` - Access to resources initialized during server startup
- Database connections, configuration objects, shared services
- Type-safe access to resources defined in your server's lifespan function
- `ctx.request_context.meta` - Request metadata from the client including:
- `progressToken` - Token for progress notifications
- Other client-provided metadata
- `ctx.request_context.request` - The original MCP request object for advanced processing
- `ctx.request_context.request_id` - Unique identifier for this request
```python
# Example with typed lifespan context
@dataclass
class AppContext:
db: Database
config: AppConfig
@mcp.tool()
def query_with_config(query: str, ctx: Context) -> str:
"""Execute a query using shared database and configuration."""
# Access typed lifespan context
app_ctx: AppContext = ctx.request_context.lifespan_context
# Use shared resources
connection = app_ctx.db
settings = app_ctx.config
# Execute query with configuration
result = connection.execute(query, timeout=settings.query_timeout)
return str(result)
```
_Full lifespan example: [examples/snippets/servers/lifespan_example.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lifespan_example.py)_
## Running Your Server
### Development Mode
The fastest way to test and debug your server is with the MCP Inspector:
```bash
uv run mcp dev server.py
# Add dependencies
uv run mcp dev server.py --with pandas --with numpy
# Mount local code
uv run mcp dev server.py --with-editable .
```
### Claude Desktop Integration
Once your server is ready, install it in Claude Desktop:
```bash
uv run mcp install server.py
# Custom name
uv run mcp install server.py --name "My Analytics Server"
# Environment variables
uv run mcp install server.py -v API_KEY=abc123 -v DB_URL=postgres://...
uv run mcp install server.py -f .env
```
### Direct Execution
For advanced scenarios like custom deployments:
<!-- snippet-source examples/snippets/servers/direct_execution.py -->
```python
"""Example showing direct execution of an MCP server.
This is the simplest way to run an MCP server directly.
cd to the `examples/snippets` directory and run:
uv run direct-execution-server
or
python servers/direct_execution.py
"""
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("My App")
@mcp.tool()
def hello(name: str = "World") -> str:
"""Say hello to someone."""
return f"Hello, {name}!"
def main():
"""Entry point for the direct execution server."""
mcp.run()
if __name__ == "__main__":
main()
```
_Full example: [examples/snippets/servers/direct_execution.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/direct_execution.py)_
<!-- /snippet-source -->
Run it with:
```bash
python servers/direct_execution.py
# or
uv run mcp run servers/direct_execution.py
```
Note that `uv run mcp run` or `uv run mcp dev` only supports server using FastMCP and not the low-level server variant.
### Streamable HTTP Transport
> **Note**: Streamable HTTP transport is superseding SSE transport for production deployments.
<!-- snippet-source examples/snippets/servers/streamable_config.py -->
```python
"""
Run from the repository root:
uv run examples/snippets/servers/streamable_config.py
"""
from mcp.server.fastmcp import FastMCP
# Stateful server (maintains session state)
mcp = FastMCP("StatefulServer")
# Other configuration options:
# Stateless server (no session persistence)
# mcp = FastMCP("StatelessServer", stateless_http=True)
# Stateless server (no session persistence, no sse stream with supported client)
# mcp = FastMCP("StatelessServer", stateless_http=True, json_response=True)
# Add a simple tool to demonstrate the server
@mcp.tool()
def greet(name: str = "World") -> str:
"""Greet someone by name."""
return f"Hello, {name}!"
# Run server with streamable_http transport
if __name__ == "__main__":
mcp.run(transport="streamable-http")
```
_Full example: [examples/snippets/servers/streamable_config.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/streamable_config.py)_
<!-- /snippet-source -->
You can mount multiple FastMCP servers in a Starlette application:
<!-- snippet-source examples/snippets/servers/streamable_starlette_mount.py -->
```python
"""
Run from the repository root:
uvicorn examples.snippets.servers.streamable_starlette_mount:app --reload
"""
import contextlib
from starlette.applications import Starlette
from starlette.routing import Mount
from mcp.server.fastmcp import FastMCP
# Create the Echo server
echo_mcp = FastMCP(name="EchoServer", stateless_http=True)
@echo_mcp.tool()
def echo(message: str) -> str:
"""A simple echo tool"""
return f"Echo: {message}"
# Create the Math server
math_mcp = FastMCP(name="MathServer", stateless_http=True)
@math_mcp.tool()
def add_two(n: int) -> int:
"""Tool to add two to the input"""
return n + 2
# Create a combined lifespan to manage both session managers
@contextlib.asynccontextmanager
async def lifespan(app: Starlette):
async with contextlib.AsyncExitStack() as stack:
await stack.enter_async_context(echo_mcp.session_manager.run())
await stack.enter_async_context(math_mcp.session_manager.run())
yield
# Create the Starlette app and mount the MCP servers
app = Starlette(
routes=[
Mount("/echo", echo_mcp.streamable_http_app()),
Mount("/math", math_mcp.streamable_http_app()),
],
lifespan=lifespan,
)
# Note: Clients connect to http://localhost:8000/echo/mcp and http://localhost:8000/math/mcp
# To mount at the root of each path (e.g., /echo instead of /echo/mcp):
# echo_mcp.settings.streamable_http_path = "/"
# math_mcp.settings.streamable_http_path = "/"
```
_Full example: [examples/snippets/servers/streamable_starlette_mount.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/streamable_starlette_mount.py)_
<!-- /snippet-source -->
For low level server with Streamable HTTP implementations, see:
- Stateful server: [`examples/servers/simple-streamablehttp/`](examples/servers/simple-streamablehttp/)
- Stateless server: [`examples/servers/simple-streamablehttp-stateless/`](examples/servers/simple-streamablehttp-stateless/)
The streamable HTTP transport supports:
- Stateful and stateless operation modes
- Resumability with event stores
- JSON or SSE response formats
- Better scalability for multi-node deployments
#### CORS Configuration for Browser-Based Clients
If you'd like your server to be accessible by browser-based MCP clients, you'll need to configure CORS headers. The `Mcp-Session-Id` header must be exposed for browser clients to access it:
```python
from starlette.applications import Starlette
from starlette.middleware.cors import CORSMiddleware
# Create your Starlette app first
starlette_app = Starlette(routes=[...])
# Then wrap it with CORS middleware
starlette_app = CORSMiddleware(
starlette_app,
allow_origins=["*"], # Configure appropriately for production
allow_methods=["GET", "POST", "DELETE"], # MCP streamable HTTP methods
expose_headers=["Mcp-Session-Id"],
)
```
This configuration is necessary because:
- The MCP streamable HTTP transport uses the `Mcp-Session-Id` header for session management
- Browsers restrict access to response headers unless explicitly exposed via CORS
- Without this configuration, browser-based clients won't be able to read the session ID from initialization responses
### Mounting to an Existing ASGI Server
By default, SSE servers are mounted at `/sse` and Streamable HTTP servers are mounted at `/mcp`. You can customize these paths using the methods described below.
For more information on mounting applications in Starlette, see the [Starlette documentation](https://www.starlette.io/routing/#submounting-routes).
#### StreamableHTTP servers
You can mount the StreamableHTTP server to an existing ASGI server using the `streamable_http_app` method. This allows you to integrate the StreamableHTTP server with other ASGI applications.
##### Basic mounting
<!-- snippet-source examples/snippets/servers/streamable_http_basic_mounting.py -->
```python
"""
Basic example showing how to mount StreamableHTTP server in Starlette.
Run from the repository root:
uvicorn examples.snippets.servers.streamable_http_basic_mounting:app --reload
"""
from starlette.applications import Starlette
from starlette.routing import Mount
from mcp.server.fastmcp import FastMCP
# Create MCP server
mcp = FastMCP("My App")
@mcp.tool()
def hello() -> str:
"""A simple hello tool"""
return "Hello from MCP!"
# Mount the StreamableHTTP server to the existing ASGI server
app = Starlette(
routes=[
Mount("/", app=mcp.streamable_http_app()),
]
)
```
_Full example: [examples/snippets/servers/streamable_http_basic_mounting.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/streamable_http_basic_mounting.py)_
<!-- /snippet-source -->
##### Host-based routing
<!-- snippet-source examples/snippets/servers/streamable_http_host_mounting.py -->
```python
"""
Example showing how to mount StreamableHTTP server using Host-based routing.
Run from the repository root:
uvicorn examples.snippets.servers.streamable_http_host_mounting:app --reload
"""
from starlette.applications import Starlette
from starlette.routing import Host
from mcp.server.fastmcp import FastMCP
# Create MCP server
mcp = FastMCP("MCP Host App")
@mcp.tool()
def domain_info() -> str:
"""Get domain-specific information"""
return "This is served from mcp.acme.corp"
# Mount using Host-based routing
app = Starlette(
routes=[
Host("mcp.acme.corp", app=mcp.streamable_http_app()),
]
)
```
_Full example: [examples/snippets/servers/streamable_http_host_mounting.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/streamable_http_host_mounting.py)_
<!-- /snippet-source -->
##### Multiple servers with path configuration
<!-- snippet-source examples/snippets/servers/streamable_http_multiple_servers.py -->
```python
"""
Example showing how to mount multiple StreamableHTTP servers with path configuration.
Run from the repository root:
uvicorn examples.snippets.servers.streamable_http_multiple_servers:app --reload
"""
from starlette.applications import Starlette
from starlette.routing import Mount
from mcp.server.fastmcp import FastMCP
# Create multiple MCP servers
api_mcp = FastMCP("API Server")
chat_mcp = FastMCP("Chat Server")
@api_mcp.tool()
def api_status() -> str:
"""Get API status"""
return "API is running"
@chat_mcp.tool()
def send_message(message: str) -> str:
"""Send a chat message"""
return f"Message sent: {message}"
# Configure servers to mount at the root of each path
# This means endpoints will be at /api and /chat instead of /api/mcp and /chat/mcp
api_mcp.settings.streamable_http_path = "/"
chat_mcp.settings.streamable_http_path = "/"
# Mount the servers
app = Starlette(
routes=[
Mount("/api", app=api_mcp.streamable_http_app()),
Mount("/chat", app=chat_mcp.streamable_http_app()),
]
)
```
_Full example: [examples/snippets/servers/streamable_http_multiple_servers.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/streamable_http_multiple_servers.py)_
<!-- /snippet-source -->
##### Path configuration at initialization
<!-- snippet-source examples/snippets/servers/streamable_http_path_config.py -->
```python
"""
Example showing path configuration during FastMCP initialization.
Run from the repository root:
uvicorn examples.snippets.servers.streamable_http_path_config:app --reload
"""
from starlette.applications import Starlette
from starlette.routing import Mount
from mcp.server.fastmcp import FastMCP
# Configure streamable_http_path during initialization
# This server will mount at the root of wherever it's mounted
mcp_at_root = FastMCP("My Server", streamable_http_path="/")
@mcp_at_root.tool()
def process_data(data: str) -> str:
"""Process some data"""
return f"Processed: {data}"
# Mount at /process - endpoints will be at /process instead of /process/mcp
app = Starlette(
routes=[
Mount("/process", app=mcp_at_root.streamable_http_app()),
]
)
```
_Full example: [examples/snippets/servers/streamable_http_path_config.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/streamable_http_path_config.py)_
<!-- /snippet-source -->
#### SSE servers
> **Note**: SSE transport is being superseded by [Streamable HTTP transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http).
You can mount the SSE server to an existing ASGI server using the `sse_app` method. This allows you to integrate the SSE server with other ASGI applications.
```python
from starlette.applications import Starlette
from starlette.routing import Mount, Host
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("My App")
# Mount the SSE server to the existing ASGI server
app = Starlette(
routes=[
Mount('/', app=mcp.sse_app()),
]
)
# or dynamically mount as host
app.router.routes.append(Host('mcp.acme.corp', app=mcp.sse_app()))
```
When mounting multiple MCP servers under different paths, you can configure the mount path in several ways:
```python
from starlette.applications import Starlette
from starlette.routing import Mount
from mcp.server.fastmcp import FastMCP
# Create multiple MCP servers
github_mcp = FastMCP("GitHub API")
browser_mcp = FastMCP("Browser")
curl_mcp = FastMCP("Curl")
search_mcp = FastMCP("Search")
# Method 1: Configure mount paths via settings (recommended for persistent configuration)
github_mcp.settings.mount_path = "/github"
browser_mcp.settings.mount_path = "/browser"
# Method 2: Pass mount path directly to sse_app (preferred for ad-hoc mounting)
# This approach doesn't modify the server's settings permanently
# Create Starlette app with multiple mounted servers
app = Starlette(
routes=[
# Using settings-based configuration
Mount("/github", app=github_mcp.sse_app()),
Mount("/browser", app=browser_mcp.sse_app()),
# Using direct mount path parameter
Mount("/curl", app=curl_mcp.sse_app("/curl")),
Mount("/search", app=search_mcp.sse_app("/search")),
]
)
# Method 3: For direct execution, you can also pass the mount path to run()
if __name__ == "__main__":
search_mcp.run(transport="sse", mount_path="/search")
```
For more information on mounting applications in Starlette, see the [Starlette documentation](https://www.starlette.io/routing/#submounting-routes).
## Advanced Usage
### Low-Level Server
For more control, you can use the low-level server implementation directly. This gives you full access to the protocol and allows you to customize every aspect of your server, including lifecycle management through the lifespan API:
<!-- snippet-source examples/snippets/servers/lowlevel/lifespan.py -->
```python
"""
Run from the repository root:
uv run examples/snippets/servers/lowlevel/lifespan.py
"""
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from typing import Any
import mcp.server.stdio
import mcp.types as types
from mcp.server.lowlevel import NotificationOptions, Server
from mcp.server.models import InitializationOptions
# Mock database class for example
class Database:
"""Mock database class for example."""
@classmethod
async def connect(cls) -> "Database":
"""Connect to database."""
print("Database connected")
return cls()
async def disconnect(self) -> None:
"""Disconnect from database."""
print("Database disconnected")
async def query(self, query_str: str) -> list[dict[str, str]]:
"""Execute a query."""
# Simulate database query
return [{"id": "1", "name": "Example", "query": query_str}]
@asynccontextmanager
async def server_lifespan(_server: Server) -> AsyncIterator[dict[str, Any]]:
"""Manage server startup and shutdown lifecycle."""
# Initialize resources on startup
db = await Database.connect()
try:
yield {"db": db}
finally:
# Clean up on shutdown
await db.disconnect()
# Pass lifespan to server
server = Server("example-server", lifespan=server_lifespan)
@server.list_tools()
async def handle_list_tools() -> list[types.Tool]:
"""List available tools."""
return [
types.Tool(
name="query_db",
description="Query the database",
inputSchema={
"type": "object",
"properties": {"query": {"type": "string", "description": "SQL query to execute"}},
"required": ["query"],
},
)
]
@server.call_tool()
async def query_db(name: str, arguments: dict[str, Any]) -> list[types.TextContent]:
"""Handle database query tool call."""
if name != "query_db":
raise ValueError(f"Unknown tool: {name}")
# Access lifespan context
ctx = server.request_context
db = ctx.lifespan_context["db"]
# Execute query
results = await db.query(arguments["query"])
return [types.TextContent(type="text", text=f"Query results: {results}")]
async def run():
"""Run the server with lifespan management."""
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
InitializationOptions(
server_name="example-server",
server_version="0.1.0",
capabilities=server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
),
)
if __name__ == "__main__":
import asyncio
asyncio.run(run())
```
_Full example: [examples/snippets/servers/lowlevel/lifespan.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/lifespan.py)_
<!-- /snippet-source -->
The lifespan API provides:
- A way to initialize resources when the server starts and clean them up when it stops
- Access to initialized resources through the request context in handlers
- Type-safe context passing between lifespan and request handlers
<!-- snippet-source examples/snippets/servers/lowlevel/basic.py -->
```python
"""
Run from the repository root:
uv run examples/snippets/servers/lowlevel/basic.py
"""
import asyncio
import mcp.server.stdio
import mcp.types as types
from mcp.server.lowlevel import NotificationOptions, Server
from mcp.server.models import InitializationOptions
# Create a server instance
server = Server("example-server")
@server.list_prompts()
async def handle_list_prompts() -> list[types.Prompt]:
"""List available prompts."""
return [
types.Prompt(
name="example-prompt",
description="An example prompt template",
arguments=[types.PromptArgument(name="arg1", description="Example argument", required=True)],
)
]
@server.get_prompt()
async def handle_get_prompt(name: str, arguments: dict[str, str] | None) -> types.GetPromptResult:
"""Get a specific prompt by name."""
if name != "example-prompt":
raise ValueError(f"Unknown prompt: {name}")
arg1_value = (arguments or {}).get("arg1", "default")
return types.GetPromptResult(
description="Example prompt",
messages=[
types.PromptMessage(
role="user",
content=types.TextContent(type="text", text=f"Example prompt text with argument: {arg1_value}"),
)
],
)
async def run():
"""Run the basic low-level server."""
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
InitializationOptions(
server_name="example",
server_version="0.1.0",
capabilities=server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
),
)
if __name__ == "__main__":
asyncio.run(run())
```
_Full example: [examples/snippets/servers/lowlevel/basic.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/basic.py)_
<!-- /snippet-source -->
Caution: The `uv run mcp run` and `uv run mcp dev` tool doesn't support low-level server.
#### Structured Output Support
The low-level server supports structured output for tools, allowing you to return both human-readable content and machine-readable structured data. Tools can define an `outputSchema` to validate their structured output:
<!-- snippet-source examples/snippets/servers/lowlevel/structured_output.py -->
```python
"""
Run from the repository root:
uv run examples/snippets/servers/lowlevel/structured_output.py
"""
import asyncio
from typing import Any
import mcp.server.stdio
import mcp.types as types
from mcp.server.lowlevel import NotificationOptions, Server
from mcp.server.models import InitializationOptions
server = Server("example-server")
@server.list_tools()
async def list_tools() -> list[types.Tool]:
"""List available tools with structured output schemas."""
return [
types.Tool(
name="get_weather",
description="Get current weather for a city",
inputSchema={
"type": "object",
"properties": {"city": {"type": "string", "description": "City name"}},
"required": ["city"],
},
outputSchema={
"type": "object",
"properties": {
"temperature": {"type": "number", "description": "Temperature in Celsius"},
"condition": {"type": "string", "description": "Weather condition"},
"humidity": {"type": "number", "description": "Humidity percentage"},
"city": {"type": "string", "description": "City name"},
},
"required": ["temperature", "condition", "humidity", "city"],
},
)
]
@server.call_tool()
async def call_tool(name: str, arguments: dict[str, Any]) -> dict[str, Any]:
"""Handle tool calls with structured output."""
if name == "get_weather":
city = arguments["city"]
# Simulated weather data - in production, call a weather API
weather_data = {
"temperature": 22.5,
"condition": "partly cloudy",
"humidity": 65,
"city": city, # Include the requested city
}
# low-level server will validate structured output against the tool's
# output schema, and additionally serialize it into a TextContent block
# for backwards compatibility with pre-2025-06-18 clients.
return weather_data
else:
raise ValueError(f"Unknown tool: {name}")
async def run():
"""Run the structured output server."""
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
InitializationOptions(
server_name="structured-output-example",
server_version="0.1.0",
capabilities=server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
),
)
if __name__ == "__main__":
asyncio.run(run())
```
_Full example: [examples/snippets/servers/lowlevel/structured_output.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/structured_output.py)_
<!-- /snippet-source -->
Tools can return data in four ways:
1. **Content only**: Return a list of content blocks (default behavior before spec revision 2025-06-18)
2. **Structured data only**: Return a dictionary that will be serialized to JSON (Introduced in spec revision 2025-06-18)
3. **Both**: Return a tuple of (content, structured_data) preferred option to use for backwards compatibility
4. **Direct CallToolResult**: Return `CallToolResult` directly for full control (including `_meta` field)
When an `outputSchema` is defined, the server automatically validates the structured output against the schema. This ensures type safety and helps catch errors early.
##### Returning CallToolResult Directly
For full control over the response including the `_meta` field (for passing data to client applications without exposing it to the model), return `CallToolResult` directly:
<!-- snippet-source examples/snippets/servers/lowlevel/direct_call_tool_result.py -->
```python
"""
Run from the repository root:
uv run examples/snippets/servers/lowlevel/direct_call_tool_result.py
"""
import asyncio
from typing import Any
import mcp.server.stdio
import mcp.types as types
from mcp.server.lowlevel import NotificationOptions, Server
from mcp.server.models import InitializationOptions
server = Server("example-server")
@server.list_tools()
async def list_tools() -> list[types.Tool]:
"""List available tools."""
return [
types.Tool(
name="advanced_tool",
description="Tool with full control including _meta field",
inputSchema={
"type": "object",
"properties": {"message": {"type": "string"}},
"required": ["message"],
},
)
]
@server.call_tool()
async def handle_call_tool(name: str, arguments: dict[str, Any]) -> types.CallToolResult:
"""Handle tool calls by returning CallToolResult directly."""
if name == "advanced_tool":
message = str(arguments.get("message", ""))
return types.CallToolResult(
content=[types.TextContent(type="text", text=f"Processed: {message}")],
structuredContent={"result": "success", "message": message},
_meta={"hidden": "data for client applications only"},
)
raise ValueError(f"Unknown tool: {name}")
async def run():
"""Run the server."""
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
InitializationOptions(
server_name="example",
server_version="0.1.0",
capabilities=server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
),
)
if __name__ == "__main__":
asyncio.run(run())
```
_Full example: [examples/snippets/servers/lowlevel/direct_call_tool_result.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/direct_call_tool_result.py)_
<!-- /snippet-source -->
**Note:** When returning `CallToolResult`, you bypass the automatic content/structured conversion. You must construct the complete response yourself.
### Pagination (Advanced)
For servers that need to handle large datasets, the low-level server provides paginated versions of list operations. This is an optional optimization - most servers won't need pagination unless they're dealing with hundreds or thousands of items.
#### Server-side Implementation
<!-- snippet-source examples/snippets/servers/pagination_example.py -->
```python
"""
Example of implementing pagination with MCP server decorators.
"""
from pydantic import AnyUrl
import mcp.types as types
from mcp.server.lowlevel import Server
# Initialize the server
server = Server("paginated-server")
# Sample data to paginate
ITEMS = [f"Item {i}" for i in range(1, 101)] # 100 items
@server.list_resources()
async def list_resources_paginated(request: types.ListResourcesRequest) -> types.ListResourcesResult:
"""List resources with pagination support."""
page_size = 10
# Extract cursor from request params
cursor = request.params.cursor if request.params is not None else None
# Parse cursor to get offset
start = 0 if cursor is None else int(cursor)
end = start + page_size
# Get page of resources
page_items = [
types.Resource(uri=AnyUrl(f"resource://items/{item}"), name=item, description=f"Description for {item}")
for item in ITEMS[start:end]
]
# Determine next cursor
next_cursor = str(end) if end < len(ITEMS) else None
return types.ListResourcesResult(resources=page_items, nextCursor=next_cursor)
```
_Full example: [examples/snippets/servers/pagination_example.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/pagination_example.py)_
<!-- /snippet-source -->
#### Client-side Consumption
<!-- snippet-source examples/snippets/clients/pagination_client.py -->
```python
"""
Example of consuming paginated MCP endpoints from a client.
"""
import asyncio
from mcp.client.session import ClientSession
from mcp.client.stdio import StdioServerParameters, stdio_client
from mcp.types import PaginatedRequestParams, Resource
async def list_all_resources() -> None:
"""Fetch all resources using pagination."""
async with stdio_client(StdioServerParameters(command="uv", args=["run", "mcp-simple-pagination"])) as (
read,
write,
):
async with ClientSession(read, write) as session:
await session.initialize()
all_resources: list[Resource] = []
cursor = None
while True:
# Fetch a page of resources
result = await session.list_resources(params=PaginatedRequestParams(cursor=cursor))
all_resources.extend(result.resources)
print(f"Fetched {len(result.resources)} resources")
# Check if there are more pages
if result.nextCursor:
cursor = result.nextCursor
else:
break
print(f"Total resources: {len(all_resources)}")
if __name__ == "__main__":
asyncio.run(list_all_resources())
```
_Full example: [examples/snippets/clients/pagination_client.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/clients/pagination_client.py)_
<!-- /snippet-source -->
#### Key Points
- **Cursors are opaque strings** - the server defines the format (numeric offsets, timestamps, etc.)
- **Return `nextCursor=None`** when there are no more pages
- **Backward compatible** - clients that don't support pagination will still work (they'll just get the first page)
- **Flexible page sizes** - Each endpoint can define its own page size based on data characteristics
See the [simple-pagination example](examples/servers/simple-pagination) for a complete implementation.
### Writing MCP Clients
The SDK provides a high-level client interface for connecting to MCP servers using various [transports](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports):
<!-- snippet-source examples/snippets/clients/stdio_client.py -->
```python
"""
cd to the `examples/snippets/clients` directory and run:
uv run client
"""
import asyncio
import os
from pydantic import AnyUrl
from mcp import ClientSession, StdioServerParameters, types
from mcp.client.stdio import stdio_client
from mcp.shared.context import RequestContext
# Create server parameters for stdio connection
server_params = StdioServerParameters(
command="uv", # Using uv to run the server
args=["run", "server", "fastmcp_quickstart", "stdio"], # We're already in snippets dir
env={"UV_INDEX": os.environ.get("UV_INDEX", "")},
)
# Optional: create a sampling callback
async def handle_sampling_message(
context: RequestContext[ClientSession, None], params: types.CreateMessageRequestParams
) -> types.CreateMessageResult:
print(f"Sampling request: {params.messages}")
return types.CreateMessageResult(
role="assistant",
content=types.TextContent(
type="text",
text="Hello, world! from model",
),
model="gpt-3.5-turbo",
stopReason="endTurn",
)
async def run():
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write, sampling_callback=handle_sampling_message) as session:
# Initialize the connection
await session.initialize()
# List available prompts
prompts = await session.list_prompts()
print(f"Available prompts: {[p.name for p in prompts.prompts]}")
# Get a prompt (greet_user prompt from fastmcp_quickstart)
if prompts.prompts:
prompt = await session.get_prompt("greet_user", arguments={"name": "Alice", "style": "friendly"})
print(f"Prompt result: {prompt.messages[0].content}")
# List available resources
resources = await session.list_resources()
print(f"Available resources: {[r.uri for r in resources.resources]}")
# List available tools
tools = await session.list_tools()
print(f"Available tools: {[t.name for t in tools.tools]}")
# Read a resource (greeting resource from fastmcp_quickstart)
resource_content = await session.read_resource(AnyUrl("greeting://World"))
content_block = resource_content.contents[0]
if isinstance(content_block, types.TextContent):
print(f"Resource content: {content_block.text}")
# Call a tool (add tool from fastmcp_quickstart)
result = await session.call_tool("add", arguments={"a": 5, "b": 3})
result_unstructured = result.content[0]
if isinstance(result_unstructured, types.TextContent):
print(f"Tool result: {result_unstructured.text}")
result_structured = result.structuredContent
print(f"Structured tool result: {result_structured}")
def main():
"""Entry point for the client script."""
asyncio.run(run())
if __name__ == "__main__":
main()
```
_Full example: [examples/snippets/clients/stdio_client.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/clients/stdio_client.py)_
<!-- /snippet-source -->
Clients can also connect using [Streamable HTTP transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http):
<!-- snippet-source examples/snippets/clients/streamable_basic.py -->
```python
"""
Run from the repository root:
uv run examples/snippets/clients/streamable_basic.py
"""
import asyncio
from mcp import ClientSession
from mcp.client.streamable_http import streamablehttp_client
async def main():
# Connect to a streamable HTTP server
async with streamablehttp_client("http://localhost:8000/mcp") as (
read_stream,
write_stream,
_,
):
# Create a session using the client streams
async with ClientSession(read_stream, write_stream) as session:
# Initialize the connection
await session.initialize()
# List available tools
tools = await session.list_tools()
print(f"Available tools: {[tool.name for tool in tools.tools]}")
if __name__ == "__main__":
asyncio.run(main())
```
_Full example: [examples/snippets/clients/streamable_basic.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/clients/streamable_basic.py)_
<!-- /snippet-source -->
### Client Display Utilities
When building MCP clients, the SDK provides utilities to help display human-readable names for tools, resources, and prompts:
<!-- snippet-source examples/snippets/clients/display_utilities.py -->
```python
"""
cd to the `examples/snippets` directory and run:
uv run display-utilities-client
"""
import asyncio
import os
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from mcp.shared.metadata_utils import get_display_name
# Create server parameters for stdio connection
server_params = StdioServerParameters(
command="uv", # Using uv to run the server
args=["run", "server", "fastmcp_quickstart", "stdio"],
env={"UV_INDEX": os.environ.get("UV_INDEX", "")},
)
async def display_tools(session: ClientSession):
"""Display available tools with human-readable names"""
tools_response = await session.list_tools()
for tool in tools_response.tools:
# get_display_name() returns the title if available, otherwise the name
display_name = get_display_name(tool)
print(f"Tool: {display_name}")
if tool.description:
print(f" {tool.description}")
async def display_resources(session: ClientSession):
"""Display available resources with human-readable names"""
resources_response = await session.list_resources()
for resource in resources_response.resources:
display_name = get_display_name(resource)
print(f"Resource: {display_name} ({resource.uri})")
templates_response = await session.list_resource_templates()
for template in templates_response.resourceTemplates:
display_name = get_display_name(template)
print(f"Resource Template: {display_name}")
async def run():
"""Run the display utilities example."""
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
# Initialize the connection
await session.initialize()
print("=== Available Tools ===")
await display_tools(session)
print("\n=== Available Resources ===")
await display_resources(session)
def main():
"""Entry point for the display utilities client."""
asyncio.run(run())
if __name__ == "__main__":
main()
```
_Full example: [examples/snippets/clients/display_utilities.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/clients/display_utilities.py)_
<!-- /snippet-source -->
The `get_display_name()` function implements the proper precedence rules for displaying names:
- For tools: `title` > `annotations.title` > `name`
- For other objects: `title` > `name`
This ensures your client UI shows the most user-friendly names that servers provide.
### OAuth Authentication for Clients
The SDK includes [authorization support](https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization) for connecting to protected MCP servers:
<!-- snippet-source examples/snippets/clients/oauth_client.py -->
```python
"""
Before running, specify running MCP RS server URL.
To spin up RS server locally, see
examples/servers/simple-auth/README.md
cd to the `examples/snippets` directory and run:
uv run oauth-client
"""
import asyncio
from urllib.parse import parse_qs, urlparse
from pydantic import AnyUrl
from mcp import ClientSession
from mcp.client.auth import OAuthClientProvider, TokenStorage
from mcp.client.streamable_http import streamablehttp_client
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
class InMemoryTokenStorage(TokenStorage):
"""Demo In-memory token storage implementation."""
def __init__(self):
self.tokens: OAuthToken | None = None
self.client_info: OAuthClientInformationFull | None = None
async def get_tokens(self) -> OAuthToken | None:
"""Get stored tokens."""
return self.tokens
async def set_tokens(self, tokens: OAuthToken) -> None:
"""Store tokens."""
self.tokens = tokens
async def get_client_info(self) -> OAuthClientInformationFull | None:
"""Get stored client information."""
return self.client_info
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
"""Store client information."""
self.client_info = client_info
async def handle_redirect(auth_url: str) -> None:
print(f"Visit: {auth_url}")
async def handle_callback() -> tuple[str, str | None]:
callback_url = input("Paste callback URL: ")
params = parse_qs(urlparse(callback_url).query)
return params["code"][0], params.get("state", [None])[0]
async def main():
"""Run the OAuth client example."""
oauth_auth = OAuthClientProvider(
server_url="http://localhost:8001",
client_metadata=OAuthClientMetadata(
client_name="Example MCP Client",
redirect_uris=[AnyUrl("http://localhost:3000/callback")],
grant_types=["authorization_code", "refresh_token"],
response_types=["code"],
scope="user",
),
storage=InMemoryTokenStorage(),
redirect_handler=handle_redirect,
callback_handler=handle_callback,
)
async with streamablehttp_client("http://localhost:8001/mcp", auth=oauth_auth) as (read, write, _):
async with ClientSession(read, write) as session:
await session.initialize()
tools = await session.list_tools()
print(f"Available tools: {[tool.name for tool in tools.tools]}")
resources = await session.list_resources()
print(f"Available resources: {[r.uri for r in resources.resources]}")
def run():
asyncio.run(main())
if __name__ == "__main__":
run()
```
_Full example: [examples/snippets/clients/oauth_client.py](https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/clients/oauth_client.py)_
<!-- /snippet-source -->
For a complete working example, see [`examples/clients/simple-auth-client/`](examples/clients/simple-auth-client/).
### Parsing Tool Results
When calling tools through MCP, the `CallToolResult` object contains the tool's response in a structured format. Understanding how to parse this result is essential for properly handling tool outputs.
```python
"""examples/snippets/clients/parsing_tool_results.py"""
import asyncio
from mcp import ClientSession, StdioServerParameters, types
from mcp.client.stdio import stdio_client
async def parse_tool_results():
"""Demonstrates how to parse different types of content in CallToolResult."""
server_params = StdioServerParameters(
command="python", args=["path/to/mcp_server.py"]
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
# Example 1: Parsing text content
result = await session.call_tool("get_data", {"format": "text"})
for content in result.content:
if isinstance(content, types.TextContent):
print(f"Text: {content.text}")
# Example 2: Parsing structured content from JSON tools
result = await session.call_tool("get_user", {"id": "123"})
if hasattr(result, "structuredContent") and result.structuredContent:
# Access structured data directly
user_data = result.structuredContent
print(f"User: {user_data.get('name')}, Age: {user_data.get('age')}")
# Example 3: Parsing embedded resources
result = await session.call_tool("read_config", {})
for content in result.content:
if isinstance(content, types.EmbeddedResource):
resource = content.resource
if isinstance(resource, types.TextResourceContents):
print(f"Config from {resource.uri}: {resource.text}")
elif isinstance(resource, types.BlobResourceContents):
print(f"Binary data from {resource.uri}")
# Example 4: Parsing image content
result = await session.call_tool("generate_chart", {"data": [1, 2, 3]})
for content in result.content:
if isinstance(content, types.ImageContent):
print(f"Image ({content.mimeType}): {len(content.data)} bytes")
# Example 5: Handling errors
result = await session.call_tool("failing_tool", {})
if result.isError:
print("Tool execution failed!")
for content in result.content:
if isinstance(content, types.TextContent):
print(f"Error: {content.text}")
async def main():
await parse_tool_results()
if __name__ == "__main__":
asyncio.run(main())
```
### MCP Primitives
The MCP protocol defines three core primitives that servers can implement:
| Primitive | Control | Description | Example Use |
|-----------|-----------------------|-----------------------------------------------------|------------------------------|
| Prompts | User-controlled | Interactive templates invoked by user choice | Slash commands, menu options |
| Resources | Application-controlled| Contextual data managed by the client application | File contents, API responses |
| Tools | Model-controlled | Functions exposed to the LLM to take actions | API calls, data updates |
### Server Capabilities
MCP servers declare capabilities during initialization:
| Capability | Feature Flag | Description |
|--------------|------------------------------|------------------------------------|
| `prompts` | `listChanged` | Prompt template management |
| `resources` | `subscribe`<br/>`listChanged`| Resource exposure and updates |
| `tools` | `listChanged` | Tool discovery and execution |
| `logging` | - | Server logging configuration |
| `completions`| - | Argument completion suggestions |
## Documentation
- [API Reference](https://modelcontextprotocol.github.io/python-sdk/api/)
- [Model Context Protocol documentation](https://modelcontextprotocol.io)
- [Model Context Protocol specification](https://modelcontextprotocol.io/specification/latest)
- [Officially supported servers](https://github.com/modelcontextprotocol/servers)
## Contributing
We are passionate about supporting contributors of all levels of experience and would love to see you get involved in the project. See the [contributing guide](CONTRIBUTING.md) to get started.
## License
This project is licensed under the MIT License - see the LICENSE file for details.
## /RELEASE.md
# Release Process
## Bumping Dependencies
1. Change dependency version in `pyproject.toml`
2. Upgrade lock with `uv lock --resolution lowest-direct`
## Major or Minor Release
Create a GitHub release via UI with the tag being `vX.Y.Z` where `X.Y.Z` is the version,
and the release title being the same. Then ask someone to review the release.
The package version will be set automatically from the tag.
## /SECURITY.md
# Security Policy
Thank you for helping us keep the SDKs and systems they interact with secure.
## Reporting Security Issues
This SDK is maintained by [Anthropic](https://www.anthropic.com/) as part of the Model Context Protocol project.
The security of our systems and user data is Anthropic’s top priority. We appreciate the work of security researchers acting in good faith in identifying and reporting potential vulnerabilities.
Our security program is managed on HackerOne and we ask that any validated vulnerability in this functionality be reported through their [submission form](https://hackerone.com/anthropic-vdp/reports/new?type=team&report_type=vulnerability).
## Vulnerability Disclosure Program
Our Vulnerability Program Guidelines are defined on our [HackerOne program page](https://hackerone.com/anthropic-vdp).
## /docs/api.md
::: mcp
## /docs/authorization.md
# Authorization
!!! warning "Under Construction"
This page is currently being written. Check back soon for complete documentation.
## /docs/concepts.md
# Concepts
!!! warning "Under Construction"
This page is currently being written. Check back soon for complete documentation.
<!--
- Server vs Client
- Three primitives (tools, resources, prompts)
- Transports (stdio, SSE, streamable HTTP)
- Context and sessions
- Lifecycle and state
-->
## /docs/index.md
# MCP Python SDK
The **Model Context Protocol (MCP)** allows applications to provide context for LLMs in a standardized way, separating the concerns of providing context from the actual LLM interaction.
This Python SDK implements the full MCP specification, making it easy to:
- **Build MCP servers** that expose resources, prompts, and tools
- **Create MCP clients** that can connect to any MCP server
- **Use standard transports** like stdio, SSE, and Streamable HTTP
If you want to read more about the specification, please visit the [MCP documentation](https://modelcontextprotocol.io).
## Quick Example
Here's a simple MCP server that exposes a tool, resource, and prompt:
```python title="server.py"
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("Test Server")
@mcp.tool()
def add(a: int, b: int) -> int:
"""Add two numbers"""
return a + b
@mcp.resource("greeting://{name}")
def get_greeting(name: str) -> str:
"""Get a personalized greeting"""
return f"Hello, {name}!"
@mcp.prompt()
def greet_user(name: str, style: str = "friendly") -> str:
"""Generate a greeting prompt"""
return f"Write a {style} greeting for someone named {name}."
```
Test it with the [MCP Inspector](https://github.com/modelcontextprotocol/inspector):
```bash
uv run mcp dev server.py
```
## Getting Started
<!-- TODO(Marcelo): automatically generate the follow references with a header on each of those files. -->
1. **[Install](installation.md)** the MCP SDK
2. **[Learn concepts](concepts.md)** - understand the three primitives and architecture
3. **[Explore authorization](authorization.md)** - add security to your servers
4. **[Use low-level APIs](low-level-server.md)** - for advanced customization
## API Reference
Full API documentation is available in the [API Reference](api.md).
## /docs/installation.md
# Installation
The Python SDK is available on PyPI as [`mcp`](https://pypi.org/project/mcp/) so installation is as simple as:
=== "pip"
```bash
pip install mcp
```
=== "uv"
```bash
uv add mcp
```
The following dependencies are automatically installed:
- [`httpx`](https://pypi.org/project/httpx/): HTTP client to handle HTTP Streamable and SSE transports.
- [`httpx-sse`](https://pypi.org/project/httpx-sse/): HTTP client to handle SSE transport.
- [`pydantic`](https://pypi.org/project/pydantic/): Types, JSON schema generation, data validation, and [more](https://docs.pydantic.dev/latest/).
- [`starlette`](https://pypi.org/project/starlette/): Web framework used to build the HTTP transport endpoints.
- [`python-multipart`](https://pypi.org/project/python-multipart/): Handle HTTP body parsing.
- [`sse-starlette`](https://pypi.org/project/sse-starlette/): Server-Sent Events for Starlette, used to build the SSE transport endpoint.
- [`pydantic-settings`](https://pypi.org/project/pydantic-settings/): Settings management used in FastMCP.
- [`uvicorn`](https://pypi.org/project/uvicorn/): ASGI server used to run the HTTP transport endpoints.
- [`jsonschema`](https://pypi.org/project/jsonschema/): JSON schema validation.
- [`pywin32`](https://pypi.org/project/pywin32/): Windows specific dependencies for the CLI tools.
This package has the following optional groups:
- `cli`: Installs `typer` and `python-dotenv` for the MCP CLI tools.
## /docs/low-level-server.md
# Low-Level Server
!!! warning "Under Construction"
This page is currently being written. Check back soon for complete documentation.
## /docs/testing.md
# Testing MCP Servers
If you call yourself a developer, you will want to test your MCP server.
The Python SDK offers the `create_connected_server_and_client_session` function to create a session
using an in-memory transport. I know, I know, the name is too long... We are working on improving it.
Anyway, let's assume you have a simple server with a single tool:
```python title="server.py"
from mcp.server import FastMCP
app = FastMCP("Calculator")
@app.tool()
def add(a: int, b: int) -> int:
"""Add two numbers.""" # (1)!
return a + b
```
1. The docstring is automatically added as the description of the tool.
To run the below test, you'll need to install the following dependencies:
=== "pip"
```bash
pip install inline-snapshot pytest
```
=== "uv"
```bash
uv add inline-snapshot pytest
```
!!! info
I think [`pytest`](https://docs.pytest.org/en/stable/) is a pretty standard testing framework,
so I won't go into details here.
The [`inline-snapshot`](https://15r10nk.github.io/inline-snapshot/latest/) is a library that allows
you to take snapshots of the output of your tests. Which makes it easier to create tests for your
server - you don't need to use it, but we are spreading the word for best practices.
```python title="test_server.py"
from collections.abc import AsyncGenerator
import pytest
from inline_snapshot import snapshot
from mcp.client.session import ClientSession
from mcp.shared.memory import create_connected_server_and_client_session
from mcp.types import CallToolResult, TextContent
from server import app
@pytest.fixture
def anyio_backend(): # (1)!
return "asyncio"
@pytest.fixture
async def client_session() -> AsyncGenerator[ClientSession]:
async with create_connected_server_and_client_session(app, raise_exceptions=True) as _session:
yield _session
@pytest.mark.anyio
async def test_call_add_tool(client_session: ClientSession):
result = await client_session.call_tool("add", {"a": 1, "b": 2})
assert result == snapshot(
CallToolResult(
content=[TextContent(type="text", text="3")],
structuredContent={"result": 3},
)
)
```
1. If you are using `trio`, you should set `"trio"` as the `anyio_backend`. Check more information in the [anyio documentation](https://anyio.readthedocs.io/en/stable/testing.html#specifying-the-backends-to-run-on).
There you go! You can now extend your tests to cover more scenarios.
## /examples/README.md
# Python SDK Examples
This folders aims to provide simple examples of using the Python SDK. Please refer to the
[servers repository](https://github.com/modelcontextprotocol/servers)
for real-world servers.
## /examples/clients/simple-auth-client/README.md
# Simple Auth Client Example
A demonstration of how to use the MCP Python SDK with OAuth authentication over streamable HTTP or SSE transport.
## Features
- OAuth 2.0 authentication with PKCE
- Support for both StreamableHTTP and SSE transports
- Interactive command-line interface
## Installation
```bash
cd examples/clients/simple-auth-client
uv sync --reinstall
```
## Usage
### 1. Start an MCP server with OAuth support
```bash
# Example with mcp-simple-auth
cd path/to/mcp-simple-auth
uv run mcp-simple-auth --transport streamable-http --port 3001
```
### 2. Run the client
```bash
uv run mcp-simple-auth-client
# Or with custom server URL
MCP_SERVER_PORT=3001 uv run mcp-simple-auth-client
# Use SSE transport
MCP_TRANSPORT_TYPE=sse uv run mcp-simple-auth-client
```
### 3. Complete OAuth flow
The client will open your browser for authentication. After completing OAuth, you can use commands:
- `list` - List available tools
- `call <tool_name> [args]` - Call a tool with optional JSON arguments
- `quit` - Exit
## Example
```markdown
🔐 Simple MCP Auth Client
Connecting to: http://localhost:3001
Please visit the following URL to authorize the application:
http://localhost:3001/authorize?response_type=code&client_id=...
✅ Connected to MCP server at http://localhost:3001
mcp> list
📋 Available tools:
1. echo - Echo back the input text
mcp> call echo {"text": "Hello, world!"}
🔧 Tool 'echo' result:
Hello, world!
mcp> quit
👋 Goodbye!
```
## Configuration
- `MCP_SERVER_PORT` - Server URL (default: 8000)
- `MCP_TRANSPORT_TYPE` - Transport type: `streamable-http` (default) or `sse`
## /examples/clients/simple-auth-client/mcp_simple_auth_client/__init__.py
```py path="/examples/clients/simple-auth-client/mcp_simple_auth_client/__init__.py"
"""Simple OAuth client for MCP simple-auth server."""
```
## /examples/clients/simple-auth-client/mcp_simple_auth_client/main.py
```py path="/examples/clients/simple-auth-client/mcp_simple_auth_client/main.py"
#!/usr/bin/env python3
"""
Simple MCP client example with OAuth authentication support.
This client connects to an MCP server using streamable HTTP transport with OAuth.
"""
import asyncio
import os
import threading
import time
import webbrowser
from datetime import timedelta
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any
from urllib.parse import parse_qs, urlparse
from mcp.client.auth import OAuthClientProvider, TokenStorage
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
from mcp.client.streamable_http import streamablehttp_client
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
class InMemoryTokenStorage(TokenStorage):
"""Simple in-memory token storage implementation."""
def __init__(self):
self._tokens: OAuthToken | None = None
self._client_info: OAuthClientInformationFull | None = None
async def get_tokens(self) -> OAuthToken | None:
return self._tokens
async def set_tokens(self, tokens: OAuthToken) -> None:
self._tokens = tokens
async def get_client_info(self) -> OAuthClientInformationFull | None:
return self._client_info
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
self._client_info = client_info
class CallbackHandler(BaseHTTPRequestHandler):
"""Simple HTTP handler to capture OAuth callback."""
def __init__(self, request, client_address, server, callback_data):
"""Initialize with callback data storage."""
self.callback_data = callback_data
super().__init__(request, client_address, server)
def do_GET(self):
"""Handle GET request from OAuth redirect."""
parsed = urlparse(self.path)
query_params = parse_qs(parsed.query)
if "code" in query_params:
self.callback_data["authorization_code"] = query_params["code"][0]
self.callback_data["state"] = query_params.get("state", [None])[0]
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"""
<html>
<body>
<h1>Authorization Successful!</h1>
<p>You can close this window and return to the terminal.</p>
<script>setTimeout(() => window.close(), 2000);</script>
</body>
</html>
""")
elif "error" in query_params:
self.callback_data["error"] = query_params["error"][0]
self.send_response(400)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
f"""
<html>
<body>
<h1>Authorization Failed</h1>
<p>Error: {query_params["error"][0]}</p>
<p>You can close this window and return to the terminal.</p>
</body>
</html>
""".encode()
)
else:
self.send_response(404)
self.end_headers()
def log_message(self, format, *args):
"""Suppress default logging."""
pass
class CallbackServer:
"""Simple server to handle OAuth callbacks."""
def __init__(self, port=3000):
self.port = port
self.server = None
self.thread = None
self.callback_data = {"authorization_code": None, "state": None, "error": None}
def _create_handler_with_data(self):
"""Create a handler class with access to callback data."""
callback_data = self.callback_data
class DataCallbackHandler(CallbackHandler):
def __init__(self, request, client_address, server):
super().__init__(request, client_address, server, callback_data)
return DataCallbackHandler
def start(self):
"""Start the callback server in a background thread."""
handler_class = self._create_handler_with_data()
self.server = HTTPServer(("localhost", self.port), handler_class)
self.thread = threading.Thread(target=self.server.serve_forever, daemon=True)
self.thread.start()
print(f"🖥️ Started callback server on http://localhost:{self.port}")
def stop(self):
"""Stop the callback server."""
if self.server:
self.server.shutdown()
self.server.server_close()
if self.thread:
self.thread.join(timeout=1)
def wait_for_callback(self, timeout=300):
"""Wait for OAuth callback with timeout."""
start_time = time.time()
while time.time() - start_time < timeout:
if self.callback_data["authorization_code"]:
return self.callback_data["authorization_code"]
elif self.callback_data["error"]:
raise Exception(f"OAuth error: {self.callback_data['error']}")
time.sleep(0.1)
raise Exception("Timeout waiting for OAuth callback")
def get_state(self):
"""Get the received state parameter."""
return self.callback_data["state"]
class SimpleAuthClient:
"""Simple MCP client with auth support."""
def __init__(self, server_url: str, transport_type: str = "streamable-http"):
self.server_url = server_url
self.transport_type = transport_type
self.session: ClientSession | None = None
async def connect(self):
"""Connect to the MCP server."""
print(f"🔗 Attempting to connect to {self.server_url}...")
try:
callback_server = CallbackServer(port=3030)
callback_server.start()
async def callback_handler() -> tuple[str, str | None]:
"""Wait for OAuth callback and return auth code and state."""
print("⏳ Waiting for authorization callback...")
try:
auth_code = callback_server.wait_for_callback(timeout=300)
return auth_code, callback_server.get_state()
finally:
callback_server.stop()
client_metadata_dict = {
"client_name": "Simple Auth Client",
"redirect_uris": ["http://localhost:3030/callback"],
"grant_types": ["authorization_code", "refresh_token"],
"response_types": ["code"],
"token_endpoint_auth_method": "client_secret_post",
}
async def _default_redirect_handler(authorization_url: str) -> None:
"""Default redirect handler that opens the URL in a browser."""
print(f"Opening browser for authorization: {authorization_url}")
webbrowser.open(authorization_url)
# Create OAuth authentication handler using the new interface
oauth_auth = OAuthClientProvider(
server_url=self.server_url,
client_metadata=OAuthClientMetadata.model_validate(client_metadata_dict),
storage=InMemoryTokenStorage(),
redirect_handler=_default_redirect_handler,
callback_handler=callback_handler,
)
# Create transport with auth handler based on transport type
if self.transport_type == "sse":
print("📡 Opening SSE transport connection with auth...")
async with sse_client(
url=self.server_url,
auth=oauth_auth,
timeout=60,
) as (read_stream, write_stream):
await self._run_session(read_stream, write_stream, None)
else:
print("📡 Opening StreamableHTTP transport connection with auth...")
async with streamablehttp_client(
url=self.server_url,
auth=oauth_auth,
timeout=timedelta(seconds=60),
) as (read_stream, write_stream, get_session_id):
await self._run_session(read_stream, write_stream, get_session_id)
except Exception as e:
print(f"❌ Failed to connect: {e}")
import traceback
traceback.print_exc()
async def _run_session(self, read_stream, write_stream, get_session_id):
"""Run the MCP session with the given streams."""
print("🤝 Initializing MCP session...")
async with ClientSession(read_stream, write_stream) as session:
self.session = session
print("⚡ Starting session initialization...")
await session.initialize()
print("✨ Session initialization complete!")
print(f"\n✅ Connected to MCP server at {self.server_url}")
if get_session_id:
session_id = get_session_id()
if session_id:
print(f"Session ID: {session_id}")
# Run interactive loop
await self.interactive_loop()
async def list_tools(self):
"""List available tools from the server."""
if not self.session:
print("❌ Not connected to server")
return
try:
result = await self.session.list_tools()
if hasattr(result, "tools") and result.tools:
print("\n📋 Available tools:")
for i, tool in enumerate(result.tools, 1):
print(f"{i}. {tool.name}")
if tool.description:
print(f" Description: {tool.description}")
print()
else:
print("No tools available")
except Exception as e:
print(f"❌ Failed to list tools: {e}")
async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None = None):
"""Call a specific tool."""
if not self.session:
print("❌ Not connected to server")
return
try:
result = await self.session.call_tool(tool_name, arguments or {})
print(f"\n🔧 Tool '{tool_name}' result:")
if hasattr(result, "content"):
for content in result.content:
if content.type == "text":
print(content.text)
else:
print(content)
else:
print(result)
except Exception as e:
print(f"❌ Failed to call tool '{tool_name}': {e}")
async def interactive_loop(self):
"""Run interactive command loop."""
print("\n🎯 Interactive MCP Client")
print("Commands:")
print(" list - List available tools")
print(" call <tool_name> [args] - Call a tool")
print(" quit - Exit the client")
print()
while True:
try:
command = input("mcp> ").strip()
if not command:
continue
if command == "quit":
break
elif command == "list":
await self.list_tools()
elif command.startswith("call "):
parts = command.split(maxsplit=2)
tool_name = parts[1] if len(parts) > 1 else ""
if not tool_name:
print("❌ Please specify a tool name")
continue
# Parse arguments (simple JSON-like format)
arguments = {}
if len(parts) > 2:
import json
try:
arguments = json.loads(parts[2])
except json.JSONDecodeError:
print("❌ Invalid arguments format (expected JSON)")
continue
await self.call_tool(tool_name, arguments)
else:
print("❌ Unknown command. Try 'list', 'call <tool_name>', or 'quit'")
except KeyboardInterrupt:
print("\n\n👋 Goodbye!")
break
except EOFError:
break
async def main():
"""Main entry point."""
# Default server URL - can be overridden with environment variable
# Most MCP streamable HTTP servers use /mcp as the endpoint
server_url = os.getenv("MCP_SERVER_PORT", 8000)
transport_type = os.getenv("MCP_TRANSPORT_TYPE", "streamable-http")
server_url = (
f"http://localhost:{server_url}/mcp"
if transport_type == "streamable-http"
else f"http://localhost:{server_url}/sse"
)
print("🚀 Simple MCP Auth Client")
print(f"Connecting to: {server_url}")
print(f"Transport type: {transport_type}")
# Start connection flow - OAuth will be handled automatically
client = SimpleAuthClient(server_url, transport_type)
await client.connect()
def cli():
"""CLI entry point for uv script."""
asyncio.run(main())
if __name__ == "__main__":
cli()
```
## /examples/clients/simple-auth-client/pyproject.toml
```toml path="/examples/clients/simple-auth-client/pyproject.toml"
[project]
name = "mcp-simple-auth-client"
version = "0.1.0"
description = "A simple OAuth client for the MCP simple-auth server"
readme = "README.md"
requires-python = ">=3.10"
authors = [{ name = "Anthropic" }]
keywords = ["mcp", "oauth", "client", "auth"]
license = { text = "MIT" }
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
]
dependencies = ["click>=8.2.0", "mcp"]
[project.scripts]
mcp-simple-auth-client = "mcp_simple_auth_client.main:cli"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["mcp_simple_auth_client"]
[tool.pyright]
include = ["mcp_simple_auth_client"]
venvPath = "."
venv = ".venv"
[tool.ruff.lint]
select = ["E", "F", "I"]
ignore = []
[tool.ruff]
line-length = 120
target-version = "py310"
[dependency-groups]
dev = ["pyright>=1.1.379", "pytest>=8.3.3", "ruff>=0.6.9"]
```
## /examples/clients/simple-chatbot/.python-version
```python-version path="/examples/clients/simple-chatbot/.python-version"
3.10
```
## /examples/clients/simple-chatbot/README.MD
# MCP Simple Chatbot
This example demonstrates how to integrate the Model Context Protocol (MCP) into a simple CLI chatbot. The implementation showcases MCP's flexibility by supporting multiple tools through MCP servers and is compatible with any LLM provider that follows OpenAI API standards.
## Requirements
- Python 3.10
- `python-dotenv`
- `requests`
- `mcp`
- `uvicorn`
## Installation
1. **Install the dependencies:**
```bash
pip install -r requirements.txt
```
2. **Set up environment variables:**
Create a `.env` file in the root directory and add your API key:
```plaintext
LLM_API_KEY=your_api_key_here
```
**Note:** The current implementation is configured to use the Groq API endpoint (`https://api.groq.com/openai/v1/chat/completions`) with the `llama-3.2-90b-vision-preview` model. If you plan to use a different LLM provider, you'll need to modify the `LLMClient` class in `main.py` to use the appropriate endpoint URL and model parameters.
3. **Configure servers:**
The `servers_config.json` follows the same structure as Claude Desktop, allowing for easy integration of multiple servers.
Here's an example:
```json
{
"mcpServers": {
"sqlite": {
"command": "uvx",
"args": ["mcp-server-sqlite", "--db-path", "./test.db"]
},
"puppeteer": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-puppeteer"]
}
}
}
```
Environment variables are supported as well. Pass them as you would with the Claude Desktop App.
Example:
```json
{
"mcpServers": {
"server_name": {
"command": "uvx",
"args": ["mcp-server-name", "--additional-args"],
"env": {
"API_KEY": "your_api_key_here"
}
}
}
}
```
## Usage
1. **Run the client:**
```bash
python main.py
```
2. **Interact with the assistant:**
The assistant will automatically detect available tools and can respond to queries based on the tools provided by the configured servers.
3. **Exit the session:**
Type `quit` or `exit` to end the session.
## Architecture
- **Tool Discovery**: Tools are automatically discovered from configured servers.
- **System Prompt**: Tools are dynamically included in the system prompt, allowing the LLM to understand available capabilities.
- **Server Integration**: Supports any MCP-compatible server, tested with various server implementations including Uvicorn and Node.js.
### Class Structure
- **Configuration**: Manages environment variables and server configurations
- **Server**: Handles MCP server initialization, tool discovery, and execution
- **Tool**: Represents individual tools with their properties and formatting
- **LLMClient**: Manages communication with the LLM provider
- **ChatSession**: Orchestrates the interaction between user, LLM, and tools
### Logic Flow
1. **Tool Integration**:
- Tools are dynamically discovered from MCP servers
- Tool descriptions are automatically included in system prompt
- Tool execution is handled through standardized MCP protocol
2. **Runtime Flow**:
- User input is received
- Input is sent to LLM with context of available tools
- LLM response is parsed:
- If it's a tool call → execute tool and return result
- If it's a direct response → return to user
- Tool results are sent back to LLM for interpretation
- Final response is presented to user
## /examples/clients/simple-chatbot/mcp_simple_chatbot/.env.example
```example path="/examples/clients/simple-chatbot/mcp_simple_chatbot/.env.example"
LLM_API_KEY=gsk_1234567890
```
## /examples/clients/simple-chatbot/mcp_simple_chatbot/main.py
```py path="/examples/clients/simple-chatbot/mcp_simple_chatbot/main.py"
import asyncio
import json
import logging
import os
import shutil
from contextlib import AsyncExitStack
from typing import Any
import httpx
from dotenv import load_dotenv
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
# Configure logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
class Configuration:
"""Manages configuration and environment variables for the MCP client."""
def __init__(self) -> None:
"""Initialize configuration with environment variables."""
self.load_env()
self.api_key = os.getenv("LLM_API_KEY")
@staticmethod
def load_env() -> None:
"""Load environment variables from .env file."""
load_dotenv()
@staticmethod
def load_config(file_path: str) -> dict[str, Any]:
"""Load server configuration from JSON file.
Args:
file_path: Path to the JSON configuration file.
Returns:
Dict containing server configuration.
Raises:
FileNotFoundError: If configuration file doesn't exist.
JSONDecodeError: If configuration file is invalid JSON.
"""
with open(file_path, "r") as f:
return json.load(f)
@property
def llm_api_key(self) -> str:
"""Get the LLM API key.
Returns:
The API key as a string.
Raises:
ValueError: If the API key is not found in environment variables.
"""
if not self.api_key:
raise ValueError("LLM_API_KEY not found in environment variables")
return self.api_key
class Server:
"""Manages MCP server connections and tool execution."""
def __init__(self, name: str, config: dict[str, Any]) -> None:
self.name: str = name
self.config: dict[str, Any] = config
self.stdio_context: Any | None = None
self.session: ClientSession | None = None
self._cleanup_lock: asyncio.Lock = asyncio.Lock()
self.exit_stack: AsyncExitStack = AsyncExitStack()
async def initialize(self) -> None:
"""Initialize the server connection."""
command = shutil.which("npx") if self.config["command"] == "npx" else self.config["command"]
if command is None:
raise ValueError("The command must be a valid string and cannot be None.")
server_params = StdioServerParameters(
command=command,
args=self.config["args"],
env={**os.environ, **self.config["env"]} if self.config.get("env") else None,
)
try:
stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
read, write = stdio_transport
session = await self.exit_stack.enter_async_context(ClientSession(read, write))
await session.initialize()
self.session = session
except Exception as e:
logging.error(f"Error initializing server {self.name}: {e}")
await self.cleanup()
raise
async def list_tools(self) -> list[Any]:
"""List available tools from the server.
Returns:
A list of available tools.
Raises:
RuntimeError: If the server is not initialized.
"""
if not self.session:
raise RuntimeError(f"Server {self.name} not initialized")
tools_response = await self.session.list_tools()
tools = []
for item in tools_response:
if isinstance(item, tuple) and item[0] == "tools":
tools.extend(Tool(tool.name, tool.description, tool.inputSchema, tool.title) for tool in item[1])
return tools
async def execute_tool(
self,
tool_name: str,
arguments: dict[str, Any],
retries: int = 2,
delay: float = 1.0,
) -> Any:
"""Execute a tool with retry mechanism.
Args:
tool_name: Name of the tool to execute.
arguments: Tool arguments.
retries: Number of retry attempts.
delay: Delay between retries in seconds.
Returns:
Tool execution result.
Raises:
RuntimeError: If server is not initialized.
Exception: If tool execution fails after all retries.
"""
if not self.session:
raise RuntimeError(f"Server {self.name} not initialized")
attempt = 0
while attempt < retries:
try:
logging.info(f"Executing {tool_name}...")
result = await self.session.call_tool(tool_name, arguments)
return result
except Exception as e:
attempt += 1
logging.warning(f"Error executing tool: {e}. Attempt {attempt} of {retries}.")
if attempt < retries:
logging.info(f"Retrying in {delay} seconds...")
await asyncio.sleep(delay)
else:
logging.error("Max retries reached. Failing.")
raise
async def cleanup(self) -> None:
"""Clean up server resources."""
async with self._cleanup_lock:
try:
await self.exit_stack.aclose()
self.session = None
self.stdio_context = None
except Exception as e:
logging.error(f"Error during cleanup of server {self.name}: {e}")
class Tool:
"""Represents a tool with its properties and formatting."""
def __init__(
self,
name: str,
description: str,
input_schema: dict[str, Any],
title: str | None = None,
) -> None:
self.name: str = name
self.title: str | None = title
self.description: str = description
self.input_schema: dict[str, Any] = input_schema
def format_for_llm(self) -> str:
"""Format tool information for LLM.
Returns:
A formatted string describing the tool.
"""
args_desc = []
if "properties" in self.input_schema:
for param_name, param_info in self.input_schema["properties"].items():
arg_desc = f"- {param_name}: {param_info.get('description', 'No description')}"
if param_name in self.input_schema.get("required", []):
arg_desc += " (required)"
args_desc.append(arg_desc)
# Build the formatted output with title as a separate field
output = f"Tool: {self.name}\n"
# Add human-readable title if available
if self.title:
output += f"User-readable title: {self.title}\n"
output += f"""Description: {self.description}
Arguments:
{chr(10).join(args_desc)}
"""
return output
class LLMClient:
"""Manages communication with the LLM provider."""
def __init__(self, api_key: str) -> None:
self.api_key: str = api_key
def get_response(self, messages: list[dict[str, str]]) -> str:
"""Get a response from the LLM.
Args:
messages: A list of message dictionaries.
Returns:
The LLM's response as a string.
Raises:
httpx.RequestError: If the request to the LLM fails.
"""
url = "https://api.groq.com/openai/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}",
}
payload = {
"messages": messages,
"model": "meta-llama/llama-4-scout-17b-16e-instruct",
"temperature": 0.7,
"max_tokens": 4096,
"top_p": 1,
"stream": False,
"stop": None,
}
try:
with httpx.Client() as client:
response = client.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
return data["choices"][0]["message"]["content"]
except httpx.RequestError as e:
error_message = f"Error getting LLM response: {str(e)}"
logging.error(error_message)
if isinstance(e, httpx.HTTPStatusError):
status_code = e.response.status_code
logging.error(f"Status code: {status_code}")
logging.error(f"Response details: {e.response.text}")
return f"I encountered an error: {error_message}. Please try again or rephrase your request."
class ChatSession:
"""Orchestrates the interaction between user, LLM, and tools."""
def __init__(self, servers: list[Server], llm_client: LLMClient) -> None:
self.servers: list[Server] = servers
self.llm_client: LLMClient = llm_client
async def cleanup_servers(self) -> None:
"""Clean up all servers properly."""
for server in reversed(self.servers):
try:
await server.cleanup()
except Exception as e:
logging.warning(f"Warning during final cleanup: {e}")
async def process_llm_response(self, llm_response: str) -> str:
"""Process the LLM response and execute tools if needed.
Args:
llm_response: The response from the LLM.
Returns:
The result of tool execution or the original response.
"""
import json
def _clean_json_string(json_string: str) -> str:
"""Remove \`\`\`json ... \`\`\` or \`\`\` ... \`\`\` wrappers if the LLM response is fenced."""
import re
pattern = r"^\`\`\`(?:\s*json)?\s*(.*?)\s*\`\`\`{{contextString}}quot;
return re.sub(pattern, r"\1", json_string, flags=re.DOTALL | re.IGNORECASE).strip()
try:
tool_call = json.loads(_clean_json_string(llm_response))
if "tool" in tool_call and "arguments" in tool_call:
logging.info(f"Executing tool: {tool_call['tool']}")
logging.info(f"With arguments: {tool_call['arguments']}")
for server in self.servers:
tools = await server.list_tools()
if any(tool.name == tool_call["tool"] for tool in tools):
try:
result = await server.execute_tool(tool_call["tool"], tool_call["arguments"])
if isinstance(result, dict) and "progress" in result:
progress = result["progress"]
total = result["total"]
percentage = (progress / total) * 100
logging.info(f"Progress: {progress}/{total} ({percentage:.1f}%)")
return f"Tool execution result: {result}"
except Exception as e:
error_msg = f"Error executing tool: {str(e)}"
logging.error(error_msg)
return error_msg
return f"No server found with tool: {tool_call['tool']}"
return llm_response
except json.JSONDecodeError:
return llm_response
async def start(self) -> None:
"""Main chat session handler."""
try:
for server in self.servers:
try:
await server.initialize()
except Exception as e:
logging.error(f"Failed to initialize server: {e}")
await self.cleanup_servers()
return
all_tools = []
for server in self.servers:
tools = await server.list_tools()
all_tools.extend(tools)
tools_description = "\n".join([tool.format_for_llm() for tool in all_tools])
system_message = (
"You are a helpful assistant with access to these tools:\n\n"
f"{tools_description}\n"
"Choose the appropriate tool based on the user's question. "
"If no tool is needed, reply directly.\n\n"
"IMPORTANT: When you need to use a tool, you must ONLY respond with "
"the exact JSON object format below, nothing else:\n"
"{\n"
' "tool": "tool-name",\n'
' "arguments": {\n'
' "argument-name": "value"\n'
" }\n"
"}\n\n"
"After receiving a tool's response:\n"
"1. Transform the raw data into a natural, conversational response\n"
"2. Keep responses concise but informative\n"
"3. Focus on the most relevant information\n"
"4. Use appropriate context from the user's question\n"
"5. Avoid simply repeating the raw data\n\n"
"Please use only the tools that are explicitly defined above."
)
messages = [{"role": "system", "content": system_message}]
while True:
try:
user_input = input("You: ").strip().lower()
if user_input in ["quit", "exit"]:
logging.info("\nExiting...")
break
messages.append({"role": "user", "content": user_input})
llm_response = self.llm_client.get_response(messages)
logging.info("\nAssistant: %s", llm_response)
result = await self.process_llm_response(llm_response)
if result != llm_response:
messages.append({"role": "assistant", "content": llm_response})
messages.append({"role": "system", "content": result})
final_response = self.llm_client.get_response(messages)
logging.info("\nFinal response: %s", final_response)
messages.append({"role": "assistant", "content": final_response})
else:
messages.append({"role": "assistant", "content": llm_response})
except KeyboardInterrupt:
logging.info("\nExiting...")
break
finally:
await self.cleanup_servers()
async def run() -> None:
"""Initialize and run the chat session."""
config = Configuration()
server_config = config.load_config("servers_config.json")
servers = [Server(name, srv_config) for name, srv_config in server_config["mcpServers"].items()]
llm_client = LLMClient(config.llm_api_key)
chat_session = ChatSession(servers, llm_client)
await chat_session.start()
def main() -> None:
asyncio.run(run())
if __name__ == "__main__":
main()
```
## /examples/clients/simple-chatbot/mcp_simple_chatbot/requirements.txt
python-dotenv>=1.0.0
requests>=2.31.0
mcp>=1.0.0
uvicorn>=0.32.1
## /examples/clients/simple-chatbot/mcp_simple_chatbot/servers_config.json
```json path="/examples/clients/simple-chatbot/mcp_simple_chatbot/servers_config.json"
{
"mcpServers": {
"sqlite": {
"command": "uvx",
"args": ["mcp-server-sqlite", "--db-path", "./test.db"]
},
"puppeteer": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-puppeteer"]
}
}
}
```
## /examples/clients/simple-chatbot/mcp_simple_chatbot/test.db
Binary file available at https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/refs/heads/main/examples/clients/simple-chatbot/mcp_simple_chatbot/test.db
## /examples/clients/simple-chatbot/pyproject.toml
```toml path="/examples/clients/simple-chatbot/pyproject.toml"
[project]
name = "mcp-simple-chatbot"
version = "0.1.0"
description = "A simple CLI chatbot using the Model Context Protocol (MCP)"
readme = "README.md"
requires-python = ">=3.10"
authors = [{ name = "Edoardo Cilia" }]
keywords = ["mcp", "llm", "chatbot", "cli"]
license = { text = "MIT" }
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
]
dependencies = [
"python-dotenv>=1.0.0",
"requests>=2.31.0",
"mcp",
"uvicorn>=0.32.1",
]
[project.scripts]
mcp-simple-chatbot = "mcp_simple_chatbot.main:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["mcp_simple_chatbot"]
[tool.pyright]
include = ["mcp_simple_chatbot"]
venvPath = "."
venv = ".venv"
[tool.ruff.lint]
select = ["E", "F", "I"]
ignore = []
[tool.ruff]
line-length = 120
target-version = "py310"
[dependency-groups]
dev = ["pyright>=1.1.379", "pytest>=8.3.3", "ruff>=0.6.9"]
```
## /examples/fastmcp/complex_inputs.py
```py path="/examples/fastmcp/complex_inputs.py"
"""
FastMCP Complex inputs Example
Demonstrates validation via pydantic with complex models.
"""
from typing import Annotated
from pydantic import BaseModel, Field
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("Shrimp Tank")
class ShrimpTank(BaseModel):
class Shrimp(BaseModel):
name: Annotated[str, Field(max_length=10)]
shrimp: list[Shrimp]
@mcp.tool()
def name_shrimp(
tank: ShrimpTank,
# You can use pydantic Field in function signatures for validation.
extra_names: Annotated[list[str], Field(max_length=10)],
) -> list[str]:
"""List all shrimp names in the tank"""
return [shrimp.name for shrimp in tank.shrimp] + extra_names
```
## /examples/fastmcp/desktop.py
```py path="/examples/fastmcp/desktop.py"
"""
FastMCP Desktop Example
A simple example that exposes the desktop directory as a resource.
"""
from pathlib import Path
from mcp.server.fastmcp import FastMCP
# Create server
mcp = FastMCP("Demo")
@mcp.resource("dir://desktop")
def desktop() -> list[str]:
"""List the files in the user's desktop"""
desktop = Path.home() / "Desktop"
return [str(f) for f in desktop.iterdir()]
@mcp.tool()
def sum(a: int, b: int) -> int:
"""Add two numbers"""
return a + b
```
## /examples/fastmcp/direct_call_tool_result_return.py
```py path="/examples/fastmcp/direct_call_tool_result_return.py"
"""
FastMCP Echo Server with direct CallToolResult return
"""
from typing import Annotated
from pydantic import BaseModel
from mcp.server.fastmcp import FastMCP
from mcp.types import CallToolResult, TextContent
mcp = FastMCP("Echo Server")
class EchoResponse(BaseModel):
text: str
@mcp.tool()
def echo(text: str) -> Annotated[CallToolResult, EchoResponse]:
"""Echo the input text with structure and metadata"""
return CallToolResult(
content=[TextContent(type="text", text=text)], structuredContent={"text": text}, _meta={"some": "metadata"}
)
```
## /examples/fastmcp/echo.py
```py path="/examples/fastmcp/echo.py"
"""
FastMCP Echo Server
"""
from mcp.server.fastmcp import FastMCP
# Create server
mcp = FastMCP("Echo Server")
@mcp.tool()
def echo_tool(text: str) -> str:
"""Echo the input text"""
return text
@mcp.resource("echo://static")
def echo_resource() -> str:
return "Echo!"
@mcp.resource("echo://{text}")
def echo_template(text: str) -> str:
"""Echo the input text"""
return f"Echo: {text}"
@mcp.prompt("echo")
def echo_prompt(text: str) -> str:
return text
```
## /examples/fastmcp/icons_demo.py
```py path="/examples/fastmcp/icons_demo.py"
"""
FastMCP Icons Demo Server
Demonstrates using icons with tools, resources, prompts, and implementation.
"""
import base64
from pathlib import Path
from mcp.server.fastmcp import FastMCP, Icon
# Load the icon file and convert to data URI
icon_path = Path(__file__).parent / "mcp.png"
icon_data = base64.standard_b64encode(icon_path.read_bytes()).decode()
icon_data_uri = f"data:image/png;base64,{icon_data}"
icon_data = Icon(src=icon_data_uri, mimeType="image/png", sizes=["64x64"])
# Create server with icons in implementation
mcp = FastMCP("Icons Demo Server", website_url="https://github.com/modelcontextprotocol/python-sdk", icons=[icon_data])
@mcp.tool(icons=[icon_data])
def demo_tool(message: str) -> str:
"""A demo tool with an icon."""
return message
@mcp.resource("demo://readme", icons=[icon_data])
def readme_resource() -> str:
"""A demo resource with an icon"""
return "This resource has an icon"
@mcp.prompt("prompt_with_icon", icons=[icon_data])
def prompt_with_icon(text: str) -> str:
"""A demo prompt with an icon"""
return text
@mcp.tool(
icons=[
Icon(src=icon_data_uri, mimeType="image/png", sizes=["16x16"]),
Icon(src=icon_data_uri, mimeType="image/png", sizes=["32x32"]),
Icon(src=icon_data_uri, mimeType="image/png", sizes=["64x64"]),
]
)
def multi_icon_tool(action: str) -> str:
"""A tool demonstrating multiple icons."""
return "multi_icon_tool"
if __name__ == "__main__":
# Run the server
mcp.run()
```
## /examples/fastmcp/logging_and_progress.py
```py path="/examples/fastmcp/logging_and_progress.py"
"""
FastMCP Echo Server that sends log messages and progress updates to the client
"""
import asyncio
from mcp.server.fastmcp import Context, FastMCP
# Create server
mcp = FastMCP("Echo Server with logging and progress updates")
@mcp.tool()
async def echo(text: str, ctx: Context) -> str:
"""Echo the input text sending log messages and progress updates during processing."""
await ctx.report_progress(progress=0, total=100)
await ctx.info("Starting to process echo for input: " + text)
await asyncio.sleep(2)
await ctx.info("Halfway through processing echo for input: " + text)
await ctx.report_progress(progress=50, total=100)
await asyncio.sleep(2)
await ctx.info("Finished processing echo for input: " + text)
await ctx.report_progress(progress=100, total=100)
# Progress notifications are process asynchronously by the client.
# A small delay here helps ensure the last notification is processed by the client.
await asyncio.sleep(0.1)
return text
```
## /examples/fastmcp/mcp.png
Binary file available at https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/refs/heads/main/examples/fastmcp/mcp.png
## /examples/fastmcp/memory.py
```py path="/examples/fastmcp/memory.py"
# /// script
# dependencies = ["pydantic-ai-slim[openai]", "asyncpg", "numpy", "pgvector"]
# ///
# uv pip install 'pydantic-ai-slim[openai]' asyncpg numpy pgvector
"""
Recursive memory system inspired by the human brain's clustering of memories.
Uses OpenAI's 'text-embedding-3-small' model and pgvector for efficient
similarity search.
"""
import asyncio
import math
import os
from dataclasses import dataclass
from datetime import datetime, timezone
from pathlib import Path
from typing import Annotated, Self, TypeVar
import asyncpg
import numpy as np
from openai import AsyncOpenAI
from pgvector.asyncpg import register_vector # Import register_vector
from pydantic import BaseModel, Field
from pydantic_ai import Agent
from mcp.server.fastmcp import FastMCP
MAX_DEPTH = 5
SIMILARITY_THRESHOLD = 0.7
DECAY_FACTOR = 0.99
REINFORCEMENT_FACTOR = 1.1
DEFAULT_LLM_MODEL = "openai:gpt-4o"
DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small"
T = TypeVar("T")
mcp = FastMCP(
"memory",
dependencies=[
"pydantic-ai-slim[openai]",
"asyncpg",
"numpy",
"pgvector",
],
)
DB_DSN = "postgresql://postgres:postgres@localhost:54320/memory_db"
# reset memory with rm ~/.fastmcp/{USER}/memory/*
PROFILE_DIR = (Path.home() / ".fastmcp" / os.environ.get("USER", "anon") / "memory").resolve()
PROFILE_DIR.mkdir(parents=True, exist_ok=True)
def cosine_similarity(a: list[float], b: list[float]) -> float:
a_array = np.array(a, dtype=np.float64)
b_array = np.array(b, dtype=np.float64)
return np.dot(a_array, b_array) / (np.linalg.norm(a_array) * np.linalg.norm(b_array))
async def do_ai(
user_prompt: str,
system_prompt: str,
result_type: type[T] | Annotated,
deps=None,
) -> T:
agent = Agent(
DEFAULT_LLM_MODEL,
system_prompt=system_prompt,
result_type=result_type,
)
result = await agent.run(user_prompt, deps=deps)
return result.data
@dataclass
class Deps:
openai: AsyncOpenAI
pool: asyncpg.Pool
async def get_db_pool() -> asyncpg.Pool:
async def init(conn):
await conn.execute("CREATE EXTENSION IF NOT EXISTS vector;")
await register_vector(conn)
pool = await asyncpg.create_pool(DB_DSN, init=init)
return pool
class MemoryNode(BaseModel):
id: int | None = None
content: str
summary: str = ""
importance: float = 1.0
access_count: int = 0
timestamp: float = Field(default_factory=lambda: datetime.now(timezone.utc).timestamp())
embedding: list[float]
@classmethod
async def from_content(cls, content: str, deps: Deps):
embedding = await get_embedding(content, deps)
return cls(content=content, embedding=embedding)
async def save(self, deps: Deps):
async with deps.pool.acquire() as conn:
if self.id is None:
result = await conn.fetchrow(
"""
INSERT INTO memories (content, summary, importance, access_count,
timestamp, embedding)
VALUES ($1, $2, $3, $4, $5, $6)
RETURNING id
""",
self.content,
self.summary,
self.importance,
self.access_count,
self.timestamp,
self.embedding,
)
self.id = result["id"]
else:
await conn.execute(
"""
UPDATE memories
SET content = $1, summary = $2, importance = $3,
access_count = $4, timestamp = $5, embedding = $6
WHERE id = $7
""",
self.content,
self.summary,
self.importance,
self.access_count,
self.timestamp,
self.embedding,
self.id,
)
async def merge_with(self, other: Self, deps: Deps):
self.content = await do_ai(
f"{self.content}\n\n{other.content}",
"Combine the following two texts into a single, coherent text.",
str,
deps,
)
self.importance += other.importance
self.access_count += other.access_count
self.embedding = [(a + b) / 2 for a, b in zip(self.embedding, other.embedding)]
self.summary = await do_ai(self.content, "Summarize the following text concisely.", str, deps)
await self.save(deps)
# Delete the merged node from the database
if other.id is not None:
await delete_memory(other.id, deps)
def get_effective_importance(self):
return self.importance * (1 + math.log(self.access_count + 1))
async def get_embedding(text: str, deps: Deps) -> list[float]:
embedding_response = await deps.openai.embeddings.create(
input=text,
model=DEFAULT_EMBEDDING_MODEL,
)
return embedding_response.data[0].embedding
async def delete_memory(memory_id: int, deps: Deps):
async with deps.pool.acquire() as conn:
await conn.execute("DELETE FROM memories WHERE id = $1", memory_id)
async def add_memory(content: str, deps: Deps):
new_memory = await MemoryNode.from_content(content, deps)
await new_memory.save(deps)
similar_memories = await find_similar_memories(new_memory.embedding, deps)
for memory in similar_memories:
if memory.id != new_memory.id:
await new_memory.merge_with(memory, deps)
await update_importance(new_memory.embedding, deps)
await prune_memories(deps)
return f"Remembered: {content}"
async def find_similar_memories(embedding: list[float], deps: Deps) -> list[MemoryNode]:
async with deps.pool.acquire() as conn:
rows = await conn.fetch(
"""
SELECT id, content, summary, importance, access_count, timestamp, embedding
FROM memories
ORDER BY embedding <-> $1
LIMIT 5
""",
embedding,
)
memories = [
MemoryNode(
id=row["id"],
content=row["content"],
summary=row["summary"],
importance=row["importance"],
access_count=row["access_count"],
timestamp=row["timestamp"],
embedding=row["embedding"],
)
for row in rows
]
return memories
async def update_importance(user_embedding: list[float], deps: Deps):
async with deps.pool.acquire() as conn:
rows = await conn.fetch("SELECT id, importance, access_count, embedding FROM memories")
for row in rows:
memory_embedding = row["embedding"]
similarity = cosine_similarity(user_embedding, memory_embedding)
if similarity > SIMILARITY_THRESHOLD:
new_importance = row["importance"] * REINFORCEMENT_FACTOR
new_access_count = row["access_count"] + 1
else:
new_importance = row["importance"] * DECAY_FACTOR
new_access_count = row["access_count"]
await conn.execute(
"""
UPDATE memories
SET importance = $1, access_count = $2
WHERE id = $3
""",
new_importance,
new_access_count,
row["id"],
)
async def prune_memories(deps: Deps):
async with deps.pool.acquire() as conn:
rows = await conn.fetch(
"""
SELECT id, importance, access_count
FROM memories
ORDER BY importance DESC
OFFSET $1
""",
MAX_DEPTH,
)
for row in rows:
await conn.execute("DELETE FROM memories WHERE id = $1", row["id"])
async def display_memory_tree(deps: Deps) -> str:
async with deps.pool.acquire() as conn:
rows = await conn.fetch(
"""
SELECT content, summary, importance, access_count
FROM memories
ORDER BY importance DESC
LIMIT $1
""",
MAX_DEPTH,
)
result = ""
for row in rows:
effective_importance = row["importance"] * (1 + math.log(row["access_count"] + 1))
summary = row["summary"] or row["content"]
result += f"- {summary} (Importance: {effective_importance:.2f})\n"
return result
@mcp.tool()
async def remember(
contents: list[str] = Field(description="List of observations or memories to store"),
):
deps = Deps(openai=AsyncOpenAI(), pool=await get_db_pool())
try:
return "\n".join(await asyncio.gather(*[add_memory(content, deps) for content in contents]))
finally:
await deps.pool.close()
@mcp.tool()
async def read_profile() -> str:
deps = Deps(openai=AsyncOpenAI(), pool=await get_db_pool())
profile = await display_memory_tree(deps)
await deps.pool.close()
return profile
async def initialize_database():
pool = await asyncpg.create_pool("postgresql://postgres:postgres@localhost:54320/postgres")
try:
async with pool.acquire() as conn:
await conn.execute("""
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = 'memory_db'
AND pid <> pg_backend_pid();
""")
await conn.execute("DROP DATABASE IF EXISTS memory_db;")
await conn.execute("CREATE DATABASE memory_db;")
finally:
await pool.close()
pool = await asyncpg.create_pool(DB_DSN)
try:
async with pool.acquire() as conn:
await conn.execute("CREATE EXTENSION IF NOT EXISTS vector;")
await register_vector(conn)
await conn.execute("""
CREATE TABLE IF NOT EXISTS memories (
id SERIAL PRIMARY KEY,
content TEXT NOT NULL,
summary TEXT,
importance REAL NOT NULL,
access_count INT NOT NULL,
timestamp DOUBLE PRECISION NOT NULL,
embedding vector(1536) NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_memories_embedding ON memories
USING hnsw (embedding vector_l2_ops);
""")
finally:
await pool.close()
if __name__ == "__main__":
asyncio.run(initialize_database())
```
## /examples/fastmcp/parameter_descriptions.py
```py path="/examples/fastmcp/parameter_descriptions.py"
"""
FastMCP Example showing parameter descriptions
"""
from pydantic import Field
from mcp.server.fastmcp import FastMCP
# Create server
mcp = FastMCP("Parameter Descriptions Server")
@mcp.tool()
def greet_user(
name: str = Field(description="The name of the person to greet"),
title: str = Field(description="Optional title like Mr/Ms/Dr", default=""),
times: int = Field(description="Number of times to repeat the greeting", default=1),
) -> str:
"""Greet a user with optional title and repetition"""
greeting = f"Hello {title + ' ' if title else ''}{name}!"
return "\n".join([greeting] * times)
```
## /examples/fastmcp/readme-quickstart.py
```py path="/examples/fastmcp/readme-quickstart.py"
from mcp.server.fastmcp import FastMCP
# Create an MCP server
mcp = FastMCP("Demo")
# Add an addition tool
@mcp.tool()
def sum(a: int, b: int) -> int:
"""Add two numbers"""
return a + b
# Add a dynamic greeting resource
@mcp.resource("greeting://{name}")
def get_greeting(name: str) -> str:
"""Get a personalized greeting"""
return f"Hello, {name}!"
```
## /examples/fastmcp/screenshot.py
```py path="/examples/fastmcp/screenshot.py"
"""
FastMCP Screenshot Example
Give Claude a tool to capture and view screenshots.
"""
import io
from mcp.server.fastmcp import FastMCP
from mcp.server.fastmcp.utilities.types import Image
# Create server
mcp = FastMCP("Screenshot Demo", dependencies=["pyautogui", "Pillow"])
@mcp.tool()
def take_screenshot() -> Image:
"""
Take a screenshot of the user's screen and return it as an image. Use
this tool anytime the user wants you to look at something they're doing.
"""
import pyautogui
buffer = io.BytesIO()
# if the file exceeds ~1MB, it will be rejected by Claude
screenshot = pyautogui.screenshot()
screenshot.convert("RGB").save(buffer, format="JPEG", quality=60, optimize=True)
return Image(data=buffer.getvalue(), format="jpeg")
```
## /examples/fastmcp/simple_echo.py
```py path="/examples/fastmcp/simple_echo.py"
"""
FastMCP Echo Server
"""
from mcp.server.fastmcp import FastMCP
# Create server
mcp = FastMCP("Echo Server")
@mcp.tool()
def echo(text: str) -> str:
"""Echo the input text"""
return text
```
## /examples/fastmcp/text_me.py
```py path="/examples/fastmcp/text_me.py"
# /// script
# dependencies = []
# ///
"""
FastMCP Text Me Server
--------------------------------
This defines a simple FastMCP server that sends a text message to a phone number via https://surgemsg.com/.
To run this example, create a `.env` file with the following values:
SURGE_API_KEY=...
SURGE_ACCOUNT_ID=...
SURGE_MY_PHONE_NUMBER=...
SURGE_MY_FIRST_NAME=...
SURGE_MY_LAST_NAME=...
Visit https://surgemsg.com/ and click "Get Started" to obtain these values.
"""
from typing import Annotated
import httpx
from pydantic import BeforeValidator
from pydantic_settings import BaseSettings, SettingsConfigDict
from mcp.server.fastmcp import FastMCP
class SurgeSettings(BaseSettings):
model_config: SettingsConfigDict = SettingsConfigDict(env_prefix="SURGE_", env_file=".env")
api_key: str
account_id: str
my_phone_number: Annotated[str, BeforeValidator(lambda v: "+" + v if not v.startswith("+") else v)]
my_first_name: str
my_last_name: str
# Create server
mcp = FastMCP("Text me")
surge_settings = SurgeSettings() # type: ignore
@mcp.tool(name="textme", description="Send a text message to me")
def text_me(text_content: str) -> str:
"""Send a text message to a phone number via https://surgemsg.com/"""
with httpx.Client() as client:
response = client.post(
"https://api.surgemsg.com/messages",
headers={
"Authorization": f"Bearer {surge_settings.api_key}",
"Surge-Account": surge_settings.account_id,
"Content-Type": "application/json",
},
json={
"body": text_content,
"conversation": {
"contact": {
"first_name": surge_settings.my_first_name,
"last_name": surge_settings.my_last_name,
"phone_number": surge_settings.my_phone_number,
}
},
},
)
response.raise_for_status()
return f"Message sent: {text_content}"
```
## /examples/fastmcp/unicode_example.py
```py path="/examples/fastmcp/unicode_example.py"
"""
Example FastMCP server that uses Unicode characters in various places to help test
Unicode handling in tools and inspectors.
"""
from mcp.server.fastmcp import FastMCP
mcp = FastMCP()
@mcp.tool(description="🌟 A tool that uses various Unicode characters in its description: á é í ó ú ñ 漢字 🎉")
def hello_unicode(name: str = "世界", greeting: str = "¡Hola") -> str:
"""
A simple tool that demonstrates Unicode handling in:
- Tool description (emojis, accents, CJK characters)
- Parameter defaults (CJK characters)
- Return values (Spanish punctuation, emojis)
"""
return f"{greeting}, {name}! 👋"
@mcp.tool(description="🎨 Tool that returns a list of emoji categories")
def list_emoji_categories() -> list[str]:
"""Returns a list of emoji categories with emoji examples."""
return [
"😀 Smileys & Emotion",
"👋 People & Body",
"🐶 Animals & Nature",
"🍎 Food & Drink",
"⚽ Activities",
"🌍 Travel & Places",
"💡 Objects",
"❤️ Symbols",
"🚩 Flags",
]
@mcp.tool(description="🔤 Tool that returns text in different scripts")
def multilingual_hello() -> str:
"""Returns hello in different scripts and writing systems."""
return "\n".join(
[
"English: Hello!",
"Spanish: ¡Hola!",
"French: Bonjour!",
"German: Grüß Gott!",
"Russian: Привет!",
"Greek: Γεια σας!",
"Hebrew: !שָׁלוֹם",
"Arabic: !مرحبا",
"Hindi: नमस्ते!",
"Chinese: 你好!",
"Japanese: こんにちは!",
"Korean: 안녕하세요!",
"Thai: สวัสดี!",
]
)
if __name__ == "__main__":
mcp.run()
```
## /examples/fastmcp/weather_structured.py
```py path="/examples/fastmcp/weather_structured.py"
"""
FastMCP Weather Example with Structured Output
Demonstrates how to use structured output with tools to return
well-typed, validated data that clients can easily process.
"""
import asyncio
import json
import sys
from dataclasses import dataclass
from datetime import datetime
from typing import TypedDict
from pydantic import BaseModel, Field
from mcp.server.fastmcp import FastMCP
from mcp.shared.memory import create_connected_server_and_client_session as client_session
# Create server
mcp = FastMCP("Weather Service")
# Example 1: Using a Pydantic model for structured output
class WeatherData(BaseModel):
"""Structured weather data response"""
temperature: float = Field(description="Temperature in Celsius")
humidity: float = Field(description="Humidity percentage (0-100)")
condition: str = Field(description="Weather condition (sunny, cloudy, rainy, etc.)")
wind_speed: float = Field(description="Wind speed in km/h")
location: str = Field(description="Location name")
timestamp: datetime = Field(default_factory=datetime.now, description="Observation time")
@mcp.tool()
def get_weather(city: str) -> WeatherData:
"""Get current weather for a city with full structured data"""
# In a real implementation, this would fetch from a weather API
return WeatherData(temperature=22.5, humidity=65.0, condition="partly cloudy", wind_speed=12.3, location=city)
# Example 2: Using TypedDict for a simpler structure
class WeatherSummary(TypedDict):
"""Simple weather summary"""
city: str
temp_c: float
description: str
@mcp.tool()
def get_weather_summary(city: str) -> WeatherSummary:
"""Get a brief weather summary for a city"""
return WeatherSummary(city=city, temp_c=22.5, description="Partly cloudy with light breeze")
# Example 3: Using dict[str, Any] for flexible schemas
@mcp.tool()
def get_weather_metrics(cities: list[str]) -> dict[str, dict[str, float]]:
"""Get weather metrics for multiple cities
Returns a dictionary mapping city names to their metrics
"""
# Returns nested dictionaries with weather metrics
return {
city: {"temperature": 20.0 + i * 2, "humidity": 60.0 + i * 5, "pressure": 1013.0 + i * 0.5}
for i, city in enumerate(cities)
}
# Example 4: Using dataclass for weather alerts
@dataclass
class WeatherAlert:
"""Weather alert information"""
severity: str # "low", "medium", "high"
title: str
description: str
affected_areas: list[str]
valid_until: datetime
@mcp.tool()
def get_weather_alerts(region: str) -> list[WeatherAlert]:
"""Get active weather alerts for a region"""
# In production, this would fetch real alerts
if region.lower() == "california":
return [
WeatherAlert(
severity="high",
title="Heat Wave Warning",
description="Temperatures expected to exceed 40 degrees",
affected_areas=["Los Angeles", "San Diego", "Riverside"],
valid_until=datetime(2024, 7, 15, 18, 0),
),
WeatherAlert(
severity="medium",
title="Air Quality Advisory",
description="Poor air quality due to wildfire smoke",
affected_areas=["San Francisco Bay Area"],
valid_until=datetime(2024, 7, 14, 12, 0),
),
]
return []
# Example 5: Returning primitives with structured output
@mcp.tool()
def get_temperature(city: str, unit: str = "celsius") -> float:
"""Get just the temperature for a city
When returning primitives as structured output,
the result is wrapped in {"result": value}
"""
base_temp = 22.5
if unit.lower() == "fahrenheit":
return base_temp * 9 / 5 + 32
return base_temp
# Example 6: Weather statistics with nested models
class DailyStats(BaseModel):
"""Statistics for a single day"""
high: float
low: float
mean: float
class WeatherStats(BaseModel):
"""Weather statistics over a period"""
location: str
period_days: int
temperature: DailyStats
humidity: DailyStats
precipitation_mm: float = Field(description="Total precipitation in millimeters")
@mcp.tool()
def get_weather_stats(city: str, days: int = 7) -> WeatherStats:
"""Get weather statistics for the past N days"""
return WeatherStats(
location=city,
period_days=days,
temperature=DailyStats(high=28.5, low=15.2, mean=21.8),
humidity=DailyStats(high=85.0, low=45.0, mean=65.0),
precipitation_mm=12.4,
)
if __name__ == "__main__":
async def test() -> None:
"""Test the tools by calling them through the server as a client would"""
print("Testing Weather Service Tools (via MCP protocol)\n")
print("=" * 80)
async with client_session(mcp._mcp_server) as client:
# Test get_weather
result = await client.call_tool("get_weather", {"city": "London"})
print("\nWeather in London:")
print(json.dumps(result.structuredContent, indent=2))
# Test get_weather_summary
result = await client.call_tool("get_weather_summary", {"city": "Paris"})
print("\nWeather summary for Paris:")
print(json.dumps(result.structuredContent, indent=2))
# Test get_weather_metrics
result = await client.call_tool("get_weather_metrics", {"cities": ["Tokyo", "Sydney", "Mumbai"]})
print("\nWeather metrics:")
print(json.dumps(result.structuredContent, indent=2))
# Test get_weather_alerts
result = await client.call_tool("get_weather_alerts", {"region": "California"})
print("\nWeather alerts for California:")
print(json.dumps(result.structuredContent, indent=2))
# Test get_temperature
result = await client.call_tool("get_temperature", {"city": "Berlin", "unit": "fahrenheit"})
print("\nTemperature in Berlin:")
print(json.dumps(result.structuredContent, indent=2))
# Test get_weather_stats
result = await client.call_tool("get_weather_stats", {"city": "Seattle", "days": 30})
print("\nWeather stats for Seattle (30 days):")
print(json.dumps(result.structuredContent, indent=2))
# Also show the text content for comparison
print("\nText content for last result:")
for content in result.content:
if content.type == "text":
print(content.text)
async def print_schemas() -> None:
"""Print all tool schemas"""
print("Tool Schemas for Weather Service\n")
print("=" * 80)
tools = await mcp.list_tools()
for tool in tools:
print(f"\nTool: {tool.name}")
print(f"Description: {tool.description}")
print("Input Schema:")
print(json.dumps(tool.inputSchema, indent=2))
if tool.outputSchema:
print("Output Schema:")
print(json.dumps(tool.outputSchema, indent=2))
else:
print("Output Schema: None (returns unstructured content)")
print("-" * 80)
# Check command line arguments
if len(sys.argv) > 1 and sys.argv[1] == "--schemas":
asyncio.run(print_schemas())
else:
print("Usage:")
print(" python weather_structured.py # Run tool tests")
print(" python weather_structured.py --schemas # Print tool schemas")
print()
asyncio.run(test())
```
## /examples/servers/simple-auth/README.md
# MCP OAuth Authentication Demo
This example demonstrates OAuth 2.0 authentication with the Model Context Protocol using **separate Authorization Server (AS) and Resource Server (RS)** to comply with the new RFC 9728 specification.
---
## Running the Servers
### Step 1: Start Authorization Server
```bash
# Navigate to the simple-auth directory
cd examples/servers/simple-auth
# Start Authorization Server on port 9000
uv run mcp-simple-auth-as --port=9000
```
**What it provides:**
- OAuth 2.0 flows (registration, authorization, token exchange)
- Simple credential-based authentication (no external provider needed)
- Token introspection endpoint for Resource Servers (`/introspect`)
---
### Step 2: Start Resource Server (MCP Server)
```bash
# In another terminal, navigate to the simple-auth directory
cd examples/servers/simple-auth
# Start Resource Server on port 8001, connected to Authorization Server
uv run mcp-simple-auth-rs --port=8001 --auth-server=http://localhost:9000 --transport=streamable-http
# With RFC 8707 strict resource validation (recommended for production)
uv run mcp-simple-auth-rs --port=8001 --auth-server=http://localhost:9000 --transport=streamable-http --oauth-strict
```
### Step 3: Test with Client
```bash
cd examples/clients/simple-auth-client
# Start client with streamable HTTP
MCP_SERVER_PORT=8001 MCP_TRANSPORT_TYPE=streamable-http uv run mcp-simple-auth-client
```
## How It Works
### RFC 9728 Discovery
**Client → Resource Server:**
```bash
curl http://localhost:8001/.well-known/oauth-protected-resource
```
```json
{
"resource": "http://localhost:8001",
"authorization_servers": ["http://localhost:9000"]
}
```
**Client → Authorization Server:**
```bash
curl http://localhost:9000/.well-known/oauth-authorization-server
```
```json
{
"issuer": "http://localhost:9000",
"authorization_endpoint": "http://localhost:9000/authorize",
"token_endpoint": "http://localhost:9000/token"
}
```
## Legacy MCP Server as Authorization Server (Backwards Compatibility)
For backwards compatibility with older MCP implementations, a legacy server is provided that acts as an Authorization Server (following the old spec where MCP servers could optionally provide OAuth):
### Running the Legacy Server
```bash
# Start legacy authorization server on port 8002
uv run mcp-simple-auth-legacy --port=8002
```
**Differences from the new architecture:**
- **MCP server acts as AS:** The MCP server itself provides OAuth endpoints (old spec behavior)
- **No separate RS:** The server handles both authentication and MCP tools
- **Local token validation:** Tokens are validated internally without introspection
- **No RFC 9728 support:** Does not provide `/.well-known/oauth-protected-resource`
- **Direct OAuth discovery:** OAuth metadata is at the MCP server's URL
### Testing with Legacy Server
```bash
# Test with client (will automatically fall back to legacy discovery)
cd examples/clients/simple-auth-client
MCP_SERVER_PORT=8002 MCP_TRANSPORT_TYPE=streamable-http uv run mcp-simple-auth-client
```
The client will:
1. Try RFC 9728 discovery at `/.well-known/oauth-protected-resource` (404 on legacy server)
2. Fall back to direct OAuth discovery at `/.well-known/oauth-authorization-server`
3. Complete authentication with the MCP server acting as its own AS
This ensures existing MCP servers (which could optionally act as Authorization Servers under the old spec) continue to work while the ecosystem transitions to the new architecture where MCP servers are Resource Servers only.
## Manual Testing
### Test Discovery
```bash
# Test Resource Server discovery endpoint (new architecture)
curl -v http://localhost:8001/.well-known/oauth-protected-resource
# Test Authorization Server metadata
curl -v http://localhost:9000/.well-known/oauth-authorization-server
```
### Test Token Introspection
```bash
# After getting a token through OAuth flow:
curl -X POST http://localhost:9000/introspect \
-H "Content-Type: application/x-www-form-urlencoded" \
-d "token=your_access_token"
```
## /examples/servers/simple-auth/mcp_simple_auth/__init__.py
```py path="/examples/servers/simple-auth/mcp_simple_auth/__init__.py"
"""Simple MCP server with GitHub OAuth authentication."""
```
## /examples/servers/simple-auth/mcp_simple_auth/__main__.py
```py path="/examples/servers/simple-auth/mcp_simple_auth/__main__.py"
"""Main entry point for simple MCP server with GitHub OAuth authentication."""
import sys
from mcp_simple_auth.server import main
sys.exit(main()) # type: ignore[call-arg]
```
## /examples/servers/simple-auth/mcp_simple_auth/auth_server.py
```py path="/examples/servers/simple-auth/mcp_simple_auth/auth_server.py"
"""
Authorization Server for MCP Split Demo.
This server handles OAuth flows, client registration, and token issuance.
Can be replaced with enterprise authorization servers like Auth0, Entra ID, etc.
NOTE: this is a simplified example for demonstration purposes.
This is not a production-ready implementation.
"""
import asyncio
import logging
import time
import click
from pydantic import AnyHttpUrl, BaseModel
from starlette.applications import Starlette
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import Route
from uvicorn import Config, Server
from mcp.server.auth.routes import cors_middleware, create_auth_routes
from mcp.server.auth.settings import AuthSettings, ClientRegistrationOptions
from .simple_auth_provider import SimpleAuthSettings, SimpleOAuthProvider
logger = logging.getLogger(__name__)
class AuthServerSettings(BaseModel):
"""Settings for the Authorization Server."""
# Server settings
host: str = "localhost"
port: int = 9000
server_url: AnyHttpUrl = AnyHttpUrl("http://localhost:9000")
auth_callback_path: str = "http://localhost:9000/login/callback"
class SimpleAuthProvider(SimpleOAuthProvider):
"""
Authorization Server provider with simple demo authentication.
This provider:
1. Issues MCP tokens after simple credential authentication
2. Stores token state for introspection by Resource Servers
"""
def __init__(self, auth_settings: SimpleAuthSettings, auth_callback_path: str, server_url: str):
super().__init__(auth_settings, auth_callback_path, server_url)
def create_authorization_server(server_settings: AuthServerSettings, auth_settings: SimpleAuthSettings) -> Starlette:
"""Create the Authorization Server application."""
oauth_provider = SimpleAuthProvider(
auth_settings, server_settings.auth_callback_path, str(server_settings.server_url)
)
mcp_auth_settings = AuthSettings(
issuer_url=server_settings.server_url,
client_registration_options=ClientRegistrationOptions(
enabled=True,
valid_scopes=[auth_settings.mcp_scope],
default_scopes=[auth_settings.mcp_scope],
),
required_scopes=[auth_settings.mcp_scope],
resource_server_url=None,
)
# Create OAuth routes
routes = create_auth_routes(
provider=oauth_provider,
issuer_url=mcp_auth_settings.issuer_url,
service_documentation_url=mcp_auth_settings.service_documentation_url,
client_registration_options=mcp_auth_settings.client_registration_options,
revocation_options=mcp_auth_settings.revocation_options,
)
# Add login page route (GET)
async def login_page_handler(request: Request) -> Response:
"""Show login form."""
state = request.query_params.get("state")
if not state:
raise HTTPException(400, "Missing state parameter")
return await oauth_provider.get_login_page(state)
routes.append(Route("/login", endpoint=login_page_handler, methods=["GET"]))
# Add login callback route (POST)
async def login_callback_handler(request: Request) -> Response:
"""Handle simple authentication callback."""
return await oauth_provider.handle_login_callback(request)
routes.append(Route("/login/callback", endpoint=login_callback_handler, methods=["POST"]))
# Add token introspection endpoint (RFC 7662) for Resource Servers
async def introspect_handler(request: Request) -> Response:
"""
Token introspection endpoint for Resource Servers.
Resource Servers call this endpoint to validate tokens without
needing direct access to token storage.
"""
form = await request.form()
token = form.get("token")
if not token or not isinstance(token, str):
return JSONResponse({"active": False}, status_code=400)
# Look up token in provider
access_token = await oauth_provider.load_access_token(token)
if not access_token:
return JSONResponse({"active": False})
return JSONResponse(
{
"active": True,
"client_id": access_token.client_id,
"scope": " ".join(access_token.scopes),
"exp": access_token.expires_at,
"iat": int(time.time()),
"token_type": "Bearer",
"aud": access_token.resource, # RFC 8707 audience claim
}
)
routes.append(
Route(
"/introspect",
endpoint=cors_middleware(introspect_handler, ["POST", "OPTIONS"]),
methods=["POST", "OPTIONS"],
)
)
return Starlette(routes=routes)
async def run_server(server_settings: AuthServerSettings, auth_settings: SimpleAuthSettings):
"""Run the Authorization Server."""
auth_server = create_authorization_server(server_settings, auth_settings)
config = Config(
auth_server,
host=server_settings.host,
port=server_settings.port,
log_level="info",
)
server = Server(config)
logger.info(f"🚀 MCP Authorization Server running on {server_settings.server_url}")
await server.serve()
@click.command()
@click.option("--port", default=9000, help="Port to listen on")
def main(port: int) -> int:
"""
Run the MCP Authorization Server.
This server handles OAuth flows and can be used by multiple Resource Servers.
Uses simple hardcoded credentials for demo purposes.
"""
logging.basicConfig(level=logging.INFO)
# Load simple auth settings
auth_settings = SimpleAuthSettings()
# Create server settings
host = "localhost"
server_url = f"http://{host}:{port}"
server_settings = AuthServerSettings(
host=host,
port=port,
server_url=AnyHttpUrl(server_url),
auth_callback_path=f"{server_url}/login",
)
asyncio.run(run_server(server_settings, auth_settings))
return 0
if __name__ == "__main__":
main() # type: ignore[call-arg]
```
## /examples/servers/simple-auth/mcp_simple_auth/legacy_as_server.py
```py path="/examples/servers/simple-auth/mcp_simple_auth/legacy_as_server.py"
"""
Legacy Combined Authorization Server + Resource Server for MCP.
This server implements the old spec where MCP servers could act as both AS and RS.
Used for backwards compatibility testing with the new split AS/RS architecture.
NOTE: this is a simplified example for demonstration purposes.
This is not a production-ready implementation.
"""
import datetime
import logging
from typing import Any, Literal
import click
from pydantic import AnyHttpUrl, BaseModel
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import Response
from mcp.server.auth.settings import AuthSettings, ClientRegistrationOptions
from mcp.server.fastmcp.server import FastMCP
from .simple_auth_provider import SimpleAuthSettings, SimpleOAuthProvider
logger = logging.getLogger(__name__)
class ServerSettings(BaseModel):
"""Settings for the simple auth MCP server."""
# Server settings
host: str = "localhost"
port: int = 8000
server_url: AnyHttpUrl = AnyHttpUrl("http://localhost:8000")
auth_callback_path: str = "http://localhost:8000/login/callback"
class LegacySimpleOAuthProvider(SimpleOAuthProvider):
"""Simple OAuth provider for legacy MCP server."""
def __init__(self, auth_settings: SimpleAuthSettings, auth_callback_path: str, server_url: str):
super().__init__(auth_settings, auth_callback_path, server_url)
def create_simple_mcp_server(server_settings: ServerSettings, auth_settings: SimpleAuthSettings) -> FastMCP:
"""Create a simple FastMCP server with simple authentication."""
oauth_provider = LegacySimpleOAuthProvider(
auth_settings, server_settings.auth_callback_path, str(server_settings.server_url)
)
mcp_auth_settings = AuthSettings(
issuer_url=server_settings.server_url,
client_registration_options=ClientRegistrationOptions(
enabled=True,
valid_scopes=[auth_settings.mcp_scope],
default_scopes=[auth_settings.mcp_scope],
),
required_scopes=[auth_settings.mcp_scope],
# No resource_server_url parameter in legacy mode
resource_server_url=None,
)
app = FastMCP(
name="Simple Auth MCP Server",
instructions="A simple MCP server with simple credential authentication",
auth_server_provider=oauth_provider,
host=server_settings.host,
port=server_settings.port,
debug=True,
auth=mcp_auth_settings,
)
@app.custom_route("/login", methods=["GET"])
async def login_page_handler(request: Request) -> Response:
"""Show login form."""
state = request.query_params.get("state")
if not state:
raise HTTPException(400, "Missing state parameter")
return await oauth_provider.get_login_page(state)
@app.custom_route("/login/callback", methods=["POST"])
async def login_callback_handler(request: Request) -> Response:
"""Handle simple authentication callback."""
return await oauth_provider.handle_login_callback(request)
@app.tool()
async def get_time() -> dict[str, Any]:
"""
Get the current server time.
This tool demonstrates that system information can be protected
by OAuth authentication. User must be authenticated to access it.
"""
now = datetime.datetime.now()
return {
"current_time": now.isoformat(),
"timezone": "UTC", # Simplified for demo
"timestamp": now.timestamp(),
"formatted": now.strftime("%Y-%m-%d %H:%M:%S"),
}
return app
@click.command()
@click.option("--port", default=8000, help="Port to listen on")
@click.option(
"--transport",
default="streamable-http",
type=click.Choice(["sse", "streamable-http"]),
help="Transport protocol to use ('sse' or 'streamable-http')",
)
def main(port: int, transport: Literal["sse", "streamable-http"]) -> int:
"""Run the simple auth MCP server."""
logging.basicConfig(level=logging.INFO)
auth_settings = SimpleAuthSettings()
# Create server settings
host = "localhost"
server_url = f"http://{host}:{port}"
server_settings = ServerSettings(
host=host,
port=port,
server_url=AnyHttpUrl(server_url),
auth_callback_path=f"{server_url}/login",
)
mcp_server = create_simple_mcp_server(server_settings, auth_settings)
logger.info(f"🚀 MCP Legacy Server running on {server_url}")
mcp_server.run(transport=transport)
return 0
if __name__ == "__main__":
main() # type: ignore[call-arg]
```
## /examples/servers/simple-auth/mcp_simple_auth/server.py
```py path="/examples/servers/simple-auth/mcp_simple_auth/server.py"
"""
MCP Resource Server with Token Introspection.
This server validates tokens via Authorization Server introspection and serves MCP resources.
Demonstrates RFC 9728 Protected Resource Metadata for AS/RS separation.
NOTE: this is a simplified example for demonstration purposes.
This is not a production-ready implementation.
"""
import datetime
import logging
from typing import Any, Literal
import click
from pydantic import AnyHttpUrl
from pydantic_settings import BaseSettings, SettingsConfigDict
from mcp.server.auth.settings import AuthSettings
from mcp.server.fastmcp.server import FastMCP
from .token_verifier import IntrospectionTokenVerifier
logger = logging.getLogger(__name__)
class ResourceServerSettings(BaseSettings):
"""Settings for the MCP Resource Server."""
model_config = SettingsConfigDict(env_prefix="MCP_RESOURCE_")
# Server settings
host: str = "localhost"
port: int = 8001
server_url: AnyHttpUrl = AnyHttpUrl("http://localhost:8001/mcp")
# Authorization Server settings
auth_server_url: AnyHttpUrl = AnyHttpUrl("http://localhost:9000")
auth_server_introspection_endpoint: str = "http://localhost:9000/introspect"
# No user endpoint needed - we get user data from token introspection
# MCP settings
mcp_scope: str = "user"
# RFC 8707 resource validation
oauth_strict: bool = False
def create_resource_server(settings: ResourceServerSettings) -> FastMCP:
"""
Create MCP Resource Server with token introspection.
This server:
1. Provides protected resource metadata (RFC 9728)
2. Validates tokens via Authorization Server introspection
3. Serves MCP tools and resources
"""
# Create token verifier for introspection with RFC 8707 resource validation
token_verifier = IntrospectionTokenVerifier(
introspection_endpoint=settings.auth_server_introspection_endpoint,
server_url=str(settings.server_url),
validate_resource=settings.oauth_strict, # Only validate when --oauth-strict is set
)
# Create FastMCP server as a Resource Server
app = FastMCP(
name="MCP Resource Server",
instructions="Resource Server that validates tokens via Authorization Server introspection",
host=settings.host,
port=settings.port,
debug=True,
# Auth configuration for RS mode
token_verifier=token_verifier,
auth=AuthSettings(
issuer_url=settings.auth_server_url,
required_scopes=[settings.mcp_scope],
resource_server_url=settings.server_url,
),
)
@app.tool()
async def get_time() -> dict[str, Any]:
"""
Get the current server time.
This tool demonstrates that system information can be protected
by OAuth authentication. User must be authenticated to access it.
"""
now = datetime.datetime.now()
return {
"current_time": now.isoformat(),
"timezone": "UTC", # Simplified for demo
"timestamp": now.timestamp(),
"formatted": now.strftime("%Y-%m-%d %H:%M:%S"),
}
return app
@click.command()
@click.option("--port", default=8001, help="Port to listen on")
@click.option("--auth-server", default="http://localhost:9000", help="Authorization Server URL")
@click.option(
"--transport",
default="streamable-http",
type=click.Choice(["sse", "streamable-http"]),
help="Transport protocol to use ('sse' or 'streamable-http')",
)
@click.option(
"--oauth-strict",
is_flag=True,
help="Enable RFC 8707 resource validation",
)
def main(port: int, auth_server: str, transport: Literal["sse", "streamable-http"], oauth_strict: bool) -> int:
"""
Run the MCP Resource Server.
This server:
- Provides RFC 9728 Protected Resource Metadata
- Validates tokens via Authorization Server introspection
- Serves MCP tools requiring authentication
Must be used with a running Authorization Server.
"""
logging.basicConfig(level=logging.INFO)
try:
# Parse auth server URL
auth_server_url = AnyHttpUrl(auth_server)
# Create settings
host = "localhost"
server_url = f"http://{host}:{port}/mcp"
settings = ResourceServerSettings(
host=host,
port=port,
server_url=AnyHttpUrl(server_url),
auth_server_url=auth_server_url,
auth_server_introspection_endpoint=f"{auth_server}/introspect",
oauth_strict=oauth_strict,
)
except ValueError as e:
logger.error(f"Configuration error: {e}")
logger.error("Make sure to provide a valid Authorization Server URL")
return 1
try:
mcp_server = create_resource_server(settings)
logger.info(f"🚀 MCP Resource Server running on {settings.server_url}")
logger.info(f"🔑 Using Authorization Server: {settings.auth_server_url}")
# Run the server - this should block and keep running
mcp_server.run(transport=transport)
logger.info("Server stopped")
return 0
except Exception:
logger.exception("Server error")
return 1
if __name__ == "__main__":
main() # type: ignore[call-arg]
```
## /examples/servers/simple-auth/mcp_simple_auth/simple_auth_provider.py
```py path="/examples/servers/simple-auth/mcp_simple_auth/simple_auth_provider.py"
"""
Simple OAuth provider for MCP servers.
This module contains a basic OAuth implementation using hardcoded user credentials
for demonstration purposes. No external authentication provider is required.
NOTE: this is a simplified example for demonstration purposes.
This is not a production-ready implementation.
"""
import logging
import secrets
import time
from typing import Any
from pydantic import AnyHttpUrl
from pydantic_settings import BaseSettings, SettingsConfigDict
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import HTMLResponse, RedirectResponse, Response
from mcp.server.auth.provider import (
AccessToken,
AuthorizationCode,
AuthorizationParams,
OAuthAuthorizationServerProvider,
RefreshToken,
construct_redirect_uri,
)
from mcp.shared.auth import OAuthClientInformationFull, OAuthToken
logger = logging.getLogger(__name__)
class SimpleAuthSettings(BaseSettings):
"""Simple OAuth settings for demo purposes."""
model_config = SettingsConfigDict(env_prefix="MCP_")
# Demo user credentials
demo_username: str = "demo_user"
demo_password: str = "demo_password"
# MCP OAuth scope
mcp_scope: str = "user"
class SimpleOAuthProvider(OAuthAuthorizationServerProvider[AuthorizationCode, RefreshToken, AccessToken]):
"""
Simple OAuth provider for demo purposes.
This provider handles the OAuth flow by:
1. Providing a simple login form for demo credentials
2. Issuing MCP tokens after successful authentication
3. Maintaining token state for introspection
"""
def __init__(self, settings: SimpleAuthSettings, auth_callback_url: str, server_url: str):
self.settings = settings
self.auth_callback_url = auth_callback_url
self.server_url = server_url
self.clients: dict[str, OAuthClientInformationFull] = {}
self.auth_codes: dict[str, AuthorizationCode] = {}
self.tokens: dict[str, AccessToken] = {}
self.state_mapping: dict[str, dict[str, str | None]] = {}
# Store authenticated user information
self.user_data: dict[str, dict[str, Any]] = {}
async def get_client(self, client_id: str) -> OAuthClientInformationFull | None:
"""Get OAuth client information."""
return self.clients.get(client_id)
async def register_client(self, client_info: OAuthClientInformationFull):
"""Register a new OAuth client."""
if not client_info.client_id:
raise ValueError("No client_id provided")
self.clients[client_info.client_id] = client_info
async def authorize(self, client: OAuthClientInformationFull, params: AuthorizationParams) -> str:
"""Generate an authorization URL for simple login flow."""
state = params.state or secrets.token_hex(16)
# Store state mapping for callback
self.state_mapping[state] = {
"redirect_uri": str(params.redirect_uri),
"code_challenge": params.code_challenge,
"redirect_uri_provided_explicitly": str(params.redirect_uri_provided_explicitly),
"client_id": client.client_id,
"resource": params.resource, # RFC 8707
}
# Build simple login URL that points to login page
auth_url = f"{self.auth_callback_url}?state={state}&client_id={client.client_id}"
return auth_url
async def get_login_page(self, state: str) -> HTMLResponse:
"""Generate login page HTML for the given state."""
if not state:
raise HTTPException(400, "Missing state parameter")
# Create simple login form HTML
html_content = f"""
<!DOCTYPE html>
<html>
<head>
<title>MCP Demo Authentication</title>
<style>
body {{ font-family: Arial, sans-serif; max-width: 500px; margin: 0 auto; padding: 20px; }}
.form-group {{ margin-bottom: 15px; }}
input {{ width: 100%; padding: 8px; margin-top: 5px; }}
button {{ background-color: #4CAF50; color: white; padding: 10px 15px; border: none; cursor: pointer; }}
</style>
</head>
<body>
<h2>MCP Demo Authentication</h2>
<p>This is a simplified authentication demo. Use the demo credentials below:</p>
<p><strong>Username:</strong> demo_user<br>
<strong>Password:</strong> demo_password</p>
<form action="{self.server_url.rstrip("/")}/login/callback" method="post">
<input type="hidden" name="state" value="{state}">
<div class="form-group">
<label>Username:</label>
<input type="text" name="username" value="demo_user" required>
</div>
<div class="form-group">
<label>Password:</label>
<input type="password" name="password" value="demo_password" required>
</div>
<button type="submit">Sign In</button>
</form>
</body>
</html>
"""
return HTMLResponse(content=html_content)
async def handle_login_callback(self, request: Request) -> Response:
"""Handle login form submission callback."""
form = await request.form()
username = form.get("username")
password = form.get("password")
state = form.get("state")
if not username or not password or not state:
raise HTTPException(400, "Missing username, password, or state parameter")
# Ensure we have strings, not UploadFile objects
if not isinstance(username, str) or not isinstance(password, str) or not isinstance(state, str):
raise HTTPException(400, "Invalid parameter types")
redirect_uri = await self.handle_simple_callback(username, password, state)
return RedirectResponse(url=redirect_uri, status_code=302)
async def handle_simple_callback(self, username: str, password: str, state: str) -> str:
"""Handle simple authentication callback and return redirect URI."""
state_data = self.state_mapping.get(state)
if not state_data:
raise HTTPException(400, "Invalid state parameter")
redirect_uri = state_data["redirect_uri"]
code_challenge = state_data["code_challenge"]
redirect_uri_provided_explicitly = state_data["redirect_uri_provided_explicitly"] == "True"
client_id = state_data["client_id"]
resource = state_data.get("resource") # RFC 8707
# These are required values from our own state mapping
assert redirect_uri is not None
assert code_challenge is not None
assert client_id is not None
# Validate demo credentials
if username != self.settings.demo_username or password != self.settings.demo_password:
raise HTTPException(401, "Invalid credentials")
# Create MCP authorization code
new_code = f"mcp_{secrets.token_hex(16)}"
auth_code = AuthorizationCode(
code=new_code,
client_id=client_id,
redirect_uri=AnyHttpUrl(redirect_uri),
redirect_uri_provided_explicitly=redirect_uri_provided_explicitly,
expires_at=time.time() + 300,
scopes=[self.settings.mcp_scope],
code_challenge=code_challenge,
resource=resource, # RFC 8707
)
self.auth_codes[new_code] = auth_code
# Store user data
self.user_data[username] = {
"username": username,
"user_id": f"user_{secrets.token_hex(8)}",
"authenticated_at": time.time(),
}
del self.state_mapping[state]
return construct_redirect_uri(redirect_uri, code=new_code, state=state)
async def load_authorization_code(
self, client: OAuthClientInformationFull, authorization_code: str
) -> AuthorizationCode | None:
"""Load an authorization code."""
return self.auth_codes.get(authorization_code)
async def exchange_authorization_code(
self, client: OAuthClientInformationFull, authorization_code: AuthorizationCode
) -> OAuthToken:
"""Exchange authorization code for tokens."""
if authorization_code.code not in self.auth_codes:
raise ValueError("Invalid authorization code")
if not client.client_id:
raise ValueError("No client_id provided")
# Generate MCP access token
mcp_token = f"mcp_{secrets.token_hex(32)}"
# Store MCP token
self.tokens[mcp_token] = AccessToken(
token=mcp_token,
client_id=client.client_id,
scopes=authorization_code.scopes,
expires_at=int(time.time()) + 3600,
resource=authorization_code.resource, # RFC 8707
)
# Store user data mapping for this token
self.user_data[mcp_token] = {
"username": self.settings.demo_username,
"user_id": f"user_{secrets.token_hex(8)}",
"authenticated_at": time.time(),
}
del self.auth_codes[authorization_code.code]
return OAuthToken(
access_token=mcp_token,
token_type="Bearer",
expires_in=3600,
scope=" ".join(authorization_code.scopes),
)
async def load_access_token(self, token: str) -> AccessToken | None:
"""Load and validate an access token."""
access_token = self.tokens.get(token)
if not access_token:
return None
# Check if expired
if access_token.expires_at and access_token.expires_at < time.time():
del self.tokens[token]
return None
return access_token
async def load_refresh_token(self, client: OAuthClientInformationFull, refresh_token: str) -> RefreshToken | None:
"""Load a refresh token - not supported in this example."""
return None
async def exchange_refresh_token(
self,
client: OAuthClientInformationFull,
refresh_token: RefreshToken,
scopes: list[str],
) -> OAuthToken:
"""Exchange refresh token - not supported in this example."""
raise NotImplementedError("Refresh tokens not supported")
# TODO(Marcelo): The type hint is wrong. We need to fix, and test to check if it works.
async def revoke_token(self, token: str, token_type_hint: str | None = None) -> None: # type: ignore
"""Revoke a token."""
if token in self.tokens:
del self.tokens[token]
```
## /examples/servers/simple-auth/mcp_simple_auth/token_verifier.py
```py path="/examples/servers/simple-auth/mcp_simple_auth/token_verifier.py"
"""Example token verifier implementation using OAuth 2.0 Token Introspection (RFC 7662)."""
import logging
from typing import Any
from mcp.server.auth.provider import AccessToken, TokenVerifier
from mcp.shared.auth_utils import check_resource_allowed, resource_url_from_server_url
logger = logging.getLogger(__name__)
class IntrospectionTokenVerifier(TokenVerifier):
"""Example token verifier that uses OAuth 2.0 Token Introspection (RFC 7662).
This is a simple example implementation for demonstration purposes.
Production implementations should consider:
- Connection pooling and reuse
- More sophisticated error handling
- Rate limiting and retry logic
- Comprehensive configuration options
"""
def __init__(
self,
introspection_endpoint: str,
server_url: str,
validate_resource: bool = False,
):
self.introspection_endpoint = introspection_endpoint
self.server_url = server_url
self.validate_resource = validate_resource
self.resource_url = resource_url_from_server_url(server_url)
async def verify_token(self, token: str) -> AccessToken | None:
"""Verify token via introspection endpoint."""
import httpx
# Validate URL to prevent SSRF attacks
if not self.introspection_endpoint.startswith(("https://", "http://localhost", "http://127.0.0.1")):
logger.warning(f"Rejecting introspection endpoint with unsafe scheme: {self.introspection_endpoint}")
return None
# Configure secure HTTP client
timeout = httpx.Timeout(10.0, connect=5.0)
limits = httpx.Limits(max_connections=10, max_keepalive_connections=5)
async with httpx.AsyncClient(
timeout=timeout,
limits=limits,
verify=True, # Enforce SSL verification
) as client:
try:
response = await client.post(
self.introspection_endpoint,
data={"token": token},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
if response.status_code != 200:
logger.debug(f"Token introspection returned status {response.status_code}")
return None
data = response.json()
if not data.get("active", False):
return None
# RFC 8707 resource validation (only when --oauth-strict is set)
if self.validate_resource and not self._validate_resource(data):
logger.warning(f"Token resource validation failed. Expected: {self.resource_url}")
return None
return AccessToken(
token=token,
client_id=data.get("client_id", "unknown"),
scopes=data.get("scope", "").split() if data.get("scope") else [],
expires_at=data.get("exp"),
resource=data.get("aud"), # Include resource in token
)
except Exception as e:
logger.warning(f"Token introspection failed: {e}")
return None
def _validate_resource(self, token_data: dict[str, Any]) -> bool:
"""Validate token was issued for this resource server."""
if not self.server_url or not self.resource_url:
return False # Fail if strict validation requested but URLs missing
# Check 'aud' claim first (standard JWT audience)
aud: list[str] | str | None = token_data.get("aud")
if isinstance(aud, list):
for audience in aud:
if self._is_valid_resource(audience):
return True
return False
elif aud:
return self._is_valid_resource(aud)
# No resource binding - invalid per RFC 8707
return False
def _is_valid_resource(self, resource: str) -> bool:
"""Check if resource matches this server using hierarchical matching."""
if not self.resource_url:
return False
return check_resource_allowed(requested_resource=self.resource_url, configured_resource=resource)
```
## /examples/servers/simple-auth/pyproject.toml
```toml path="/examples/servers/simple-auth/pyproject.toml"
[project]
name = "mcp-simple-auth"
version = "0.1.0"
description = "A simple MCP server demonstrating OAuth authentication"
readme = "README.md"
requires-python = ">=3.10"
authors = [{ name = "Anthropic, PBC." }]
license = { text = "MIT" }
dependencies = [
"anyio>=4.5",
"click>=8.2.0",
"httpx>=0.27",
"mcp",
"pydantic>=2.0",
"pydantic-settings>=2.5.2",
"sse-starlette>=1.6.1",
"uvicorn>=0.23.1; sys_platform != 'emscripten'",
]
[project.scripts]
mcp-simple-auth-rs = "mcp_simple_auth.server:main"
mcp-simple-auth-as = "mcp_simple_auth.auth_server:main"
mcp-simple-auth-legacy = "mcp_simple_auth.legacy_as_server:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["mcp_simple_auth"]
[dependency-groups]
dev = ["pyright>=1.1.391", "pytest>=8.3.4", "ruff>=0.8.5"]
```
## /examples/servers/simple-pagination/README.md
# MCP Simple Pagination
A simple MCP server demonstrating pagination for tools, resources, and prompts using cursor-based pagination.
## Usage
Start the server using either stdio (default) or SSE transport:
```bash
# Using stdio transport (default)
uv run mcp-simple-pagination
# Using SSE transport on custom port
uv run mcp-simple-pagination --transport sse --port 8000
```
The server exposes:
- 25 tools (paginated, 5 per page)
- 30 resources (paginated, 10 per page)
- 20 prompts (paginated, 7 per page)
Each paginated list returns a `nextCursor` when more pages are available. Use this cursor in subsequent requests to retrieve the next page.
## Example
Using the MCP client, you can retrieve paginated items like this using the STDIO transport:
```python
import asyncio
from mcp.client.session import ClientSession
from mcp.client.stdio import StdioServerParameters, stdio_client
async def main():
async with stdio_client(
StdioServerParameters(command="uv", args=["run", "mcp-simple-pagination"])
) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
# Get first page of tools
tools_page1 = await session.list_tools()
print(f"First page: {len(tools_page1.tools)} tools")
print(f"Next cursor: {tools_page1.nextCursor}")
# Get second page using cursor
if tools_page1.nextCursor:
tools_page2 = await session.list_tools(cursor=tools_page1.nextCursor)
print(f"Second page: {len(tools_page2.tools)} tools")
# Similarly for resources
resources_page1 = await session.list_resources()
print(f"First page: {len(resources_page1.resources)} resources")
# And for prompts
prompts_page1 = await session.list_prompts()
print(f"First page: {len(prompts_page1.prompts)} prompts")
asyncio.run(main())
```
## Pagination Details
The server uses simple numeric indices as cursors for demonstration purposes. In production scenarios, you might use:
- Database offsets or row IDs
- Timestamps for time-based pagination
- Opaque tokens encoding pagination state
The pagination implementation demonstrates:
- Handling `None` cursor for the first page
- Returning `nextCursor` when more data exists
- Gracefully handling invalid cursors
- Different page sizes for different resource types
## /examples/servers/simple-pagination/mcp_simple_pagination/__init__.py
```py path="/examples/servers/simple-pagination/mcp_simple_pagination/__init__.py"
```
## /examples/servers/simple-pagination/mcp_simple_pagination/__main__.py
```py path="/examples/servers/simple-pagination/mcp_simple_pagination/__main__.py"
import sys
from .server import main
sys.exit(main()) # type: ignore[call-arg]
```
## /examples/servers/simple-pagination/mcp_simple_pagination/server.py
```py path="/examples/servers/simple-pagination/mcp_simple_pagination/server.py"
"""
Simple MCP server demonstrating pagination for tools, resources, and prompts.
This example shows how to use the paginated decorators to handle large lists
of items that need to be split across multiple pages.
"""
from typing import Any
import anyio
import click
import mcp.types as types
from mcp.server.lowlevel import Server
from pydantic import AnyUrl
from starlette.requests import Request
# Sample data - in real scenarios, this might come from a database
SAMPLE_TOOLS = [
types.Tool(
name=f"tool_{i}",
title=f"Tool {i}",
description=f"This is sample tool number {i}",
inputSchema={"type": "object", "properties": {"input": {"type": "string"}}},
)
for i in range(1, 26) # 25 tools total
]
SAMPLE_RESOURCES = [
types.Resource(
uri=AnyUrl(f"file:///path/to/resource_{i}.txt"),
name=f"resource_{i}",
description=f"This is sample resource number {i}",
)
for i in range(1, 31) # 30 resources total
]
SAMPLE_PROMPTS = [
types.Prompt(
name=f"prompt_{i}",
description=f"This is sample prompt number {i}",
arguments=[
types.PromptArgument(name="arg1", description="First argument", required=True),
],
)
for i in range(1, 21) # 20 prompts total
]
@click.command()
@click.option("--port", default=8000, help="Port to listen on for SSE")
@click.option(
"--transport",
type=click.Choice(["stdio", "sse"]),
default="stdio",
help="Transport type",
)
def main(port: int, transport: str) -> int:
app = Server("mcp-simple-pagination")
# Paginated list_tools - returns 5 tools per page
@app.list_tools()
async def list_tools_paginated(request: types.ListToolsRequest) -> types.ListToolsResult:
page_size = 5
cursor = request.params.cursor if request.params is not None else None
if cursor is None:
# First page
start_idx = 0
else:
# Parse cursor to get the start index
try:
start_idx = int(cursor)
except (ValueError, TypeError):
# Invalid cursor, return empty
return types.ListToolsResult(tools=[], nextCursor=None)
# Get the page of tools
page_tools = SAMPLE_TOOLS[start_idx : start_idx + page_size]
# Determine if there are more pages
next_cursor = None
if start_idx + page_size < len(SAMPLE_TOOLS):
next_cursor = str(start_idx + page_size)
return types.ListToolsResult(tools=page_tools, nextCursor=next_cursor)
# Paginated list_resources - returns 10 resources per page
@app.list_resources()
async def list_resources_paginated(
request: types.ListResourcesRequest,
) -> types.ListResourcesResult:
page_size = 10
cursor = request.params.cursor if request.params is not None else None
if cursor is None:
# First page
start_idx = 0
else:
# Parse cursor to get the start index
try:
start_idx = int(cursor)
except (ValueError, TypeError):
# Invalid cursor, return empty
return types.ListResourcesResult(resources=[], nextCursor=None)
# Get the page of resources
page_resources = SAMPLE_RESOURCES[start_idx : start_idx + page_size]
# Determine if there are more pages
next_cursor = None
if start_idx + page_size < len(SAMPLE_RESOURCES):
next_cursor = str(start_idx + page_size)
return types.ListResourcesResult(resources=page_resources, nextCursor=next_cursor)
# Paginated list_prompts - returns 7 prompts per page
@app.list_prompts()
async def list_prompts_paginated(
request: types.ListPromptsRequest,
) -> types.ListPromptsResult:
page_size = 7
cursor = request.params.cursor if request.params is not None else None
if cursor is None:
# First page
start_idx = 0
else:
# Parse cursor to get the start index
try:
start_idx = int(cursor)
except (ValueError, TypeError):
# Invalid cursor, return empty
return types.ListPromptsResult(prompts=[], nextCursor=None)
# Get the page of prompts
page_prompts = SAMPLE_PROMPTS[start_idx : start_idx + page_size]
# Determine if there are more pages
next_cursor = None
if start_idx + page_size < len(SAMPLE_PROMPTS):
next_cursor = str(start_idx + page_size)
return types.ListPromptsResult(prompts=page_prompts, nextCursor=next_cursor)
# Implement call_tool handler
@app.call_tool()
async def call_tool(name: str, arguments: dict[str, Any]) -> list[types.ContentBlock]:
# Find the tool in our sample data
tool = next((t for t in SAMPLE_TOOLS if t.name == name), None)
if not tool:
raise ValueError(f"Unknown tool: {name}")
# Simple mock response
return [
types.TextContent(
type="text",
text=f"Called tool '{name}' with arguments: {arguments}",
)
]
# Implement read_resource handler
@app.read_resource()
async def read_resource(uri: AnyUrl) -> str:
# Find the resource in our sample data
resource = next((r for r in SAMPLE_RESOURCES if r.uri == uri), None)
if not resource:
raise ValueError(f"Unknown resource: {uri}")
# Return a simple string - the decorator will convert it to TextResourceContents
return f"Content of {resource.name}: This is sample content for the resource."
# Implement get_prompt handler
@app.get_prompt()
async def get_prompt(name: str, arguments: dict[str, str] | None) -> types.GetPromptResult:
# Find the prompt in our sample data
prompt = next((p for p in SAMPLE_PROMPTS if p.name == name), None)
if not prompt:
raise ValueError(f"Unknown prompt: {name}")
# Simple mock response
message_text = f"This is the prompt '{name}'"
if arguments:
message_text += f" with arguments: {arguments}"
return types.GetPromptResult(
description=prompt.description,
messages=[
types.PromptMessage(
role="user",
content=types.TextContent(type="text", text=message_text),
)
],
)
if transport == "sse":
from mcp.server.sse import SseServerTransport
from starlette.applications import Starlette
from starlette.responses import Response
from starlette.routing import Mount, Route
sse = SseServerTransport("/messages/")
async def handle_sse(request: Request):
async with sse.connect_sse(request.scope, request.receive, request._send) as streams: # type: ignore[reportPrivateUsage]
await app.run(streams[0], streams[1], app.create_initialization_options())
return Response()
starlette_app = Starlette(
debug=True,
routes=[
Route("/sse", endpoint=handle_sse, methods=["GET"]),
Mount("/messages/", app=sse.handle_post_message),
],
)
import uvicorn
uvicorn.run(starlette_app, host="127.0.0.1", port=port)
else:
from mcp.server.stdio import stdio_server
async def arun():
async with stdio_server() as streams:
await app.run(streams[0], streams[1], app.create_initialization_options())
anyio.run(arun)
return 0
```
## /examples/servers/simple-pagination/pyproject.toml
```toml path="/examples/servers/simple-pagination/pyproject.toml"
[project]
name = "mcp-simple-pagination"
version = "0.1.0"
description = "A simple MCP server demonstrating pagination for tools, resources, and prompts"
readme = "README.md"
requires-python = ">=3.10"
authors = [{ name = "Anthropic, PBC." }]
maintainers = [
{ name = "David Soria Parra", email = "davidsp@anthropic.com" },
{ name = "Justin Spahr-Summers", email = "justin@anthropic.com" },
]
keywords = ["mcp", "llm", "automation", "pagination", "cursor"]
license = { text = "MIT" }
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
]
dependencies = ["anyio>=4.5", "click>=8.2.0", "httpx>=0.27", "mcp"]
[project.scripts]
mcp-simple-pagination = "mcp_simple_pagination.server:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["mcp_simple_pagination"]
[tool.pyright]
include = ["mcp_simple_pagination"]
venvPath = "."
venv = ".venv"
[tool.ruff.lint]
select = ["E", "F", "I"]
ignore = []
[tool.ruff]
line-length = 120
target-version = "py310"
[dependency-groups]
dev = ["pyright>=1.1.378", "pytest>=8.3.3", "ruff>=0.6.9"]
```
## /examples/servers/simple-prompt/.python-version
```python-version path="/examples/servers/simple-prompt/.python-version"
3.10
```
## /examples/servers/simple-prompt/README.md
# MCP Simple Prompt
A simple MCP server that exposes a customizable prompt template with optional context and topic parameters.
## Usage
Start the server using either stdio (default) or SSE transport:
```bash
# Using stdio transport (default)
uv run mcp-simple-prompt
# Using SSE transport on custom port
uv run mcp-simple-prompt --transport sse --port 8000
```
The server exposes a prompt named "simple" that accepts two optional arguments:
- `context`: Additional context to consider
- `topic`: Specific topic to focus on
## Example
Using the MCP client, you can retrieve the prompt like this using the STDIO transport:
```python
import asyncio
from mcp.client.session import ClientSession
from mcp.client.stdio import StdioServerParameters, stdio_client
async def main():
async with stdio_client(
StdioServerParameters(command="uv", args=["run", "mcp-simple-prompt"])
) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
# List available prompts
prompts = await session.list_prompts()
print(prompts)
# Get the prompt with arguments
prompt = await session.get_prompt(
"simple",
{
"context": "User is a software developer",
"topic": "Python async programming",
},
)
print(prompt)
asyncio.run(main())
```
## /examples/servers/simple-prompt/mcp_simple_prompt/__init__.py
```py path="/examples/servers/simple-prompt/mcp_simple_prompt/__init__.py"
```
## /examples/servers/simple-prompt/mcp_simple_prompt/__main__.py
```py path="/examples/servers/simple-prompt/mcp_simple_prompt/__main__.py"
import sys
from .server import main
sys.exit(main()) # type: ignore[call-arg]
```
## /examples/servers/simple-prompt/mcp_simple_prompt/server.py
```py path="/examples/servers/simple-prompt/mcp_simple_prompt/server.py"
import anyio
import click
import mcp.types as types
from mcp.server.lowlevel import Server
from starlette.requests import Request
def create_messages(context: str | None = None, topic: str | None = None) -> list[types.PromptMessage]:
"""Create the messages for the prompt."""
messages: list[types.PromptMessage] = []
# Add context if provided
if context:
messages.append(
types.PromptMessage(
role="user",
content=types.TextContent(type="text", text=f"Here is some relevant context: {context}"),
)
)
# Add the main prompt
prompt = "Please help me with "
if topic:
prompt += f"the following topic: {topic}"
else:
prompt += "whatever questions I may have."
messages.append(types.PromptMessage(role="user", content=types.TextContent(type="text", text=prompt)))
return messages
@click.command()
@click.option("--port", default=8000, help="Port to listen on for SSE")
@click.option(
"--transport",
type=click.Choice(["stdio", "sse"]),
default="stdio",
help="Transport type",
)
def main(port: int, transport: str) -> int:
app = Server("mcp-simple-prompt")
@app.list_prompts()
async def list_prompts() -> list[types.Prompt]:
return [
types.Prompt(
name="simple",
title="Simple Assistant Prompt",
description="A simple prompt that can take optional context and topic arguments",
arguments=[
types.PromptArgument(
name="context",
description="Additional context to consider",
required=False,
),
types.PromptArgument(
name="topic",
description="Specific topic to focus on",
required=False,
),
],
)
]
@app.get_prompt()
async def get_prompt(name: str, arguments: dict[str, str] | None = None) -> types.GetPromptResult:
if name != "simple":
raise ValueError(f"Unknown prompt: {name}")
if arguments is None:
arguments = {}
return types.GetPromptResult(
messages=create_messages(context=arguments.get("context"), topic=arguments.get("topic")),
description="A simple prompt with optional context and topic arguments",
)
if transport == "sse":
from mcp.server.sse import SseServerTransport
from starlette.applications import Starlette
from starlette.responses import Response
from starlette.routing import Mount, Route
sse = SseServerTransport("/messages/")
async def handle_sse(request: Request):
async with sse.connect_sse(request.scope, request.receive, request._send) as streams: # type: ignore[reportPrivateUsage]
await app.run(streams[0], streams[1], app.create_initialization_options())
return Response()
starlette_app = Starlette(
debug=True,
routes=[
Route("/sse", endpoint=handle_sse),
Mount("/messages/", app=sse.handle_post_message),
],
)
import uvicorn
uvicorn.run(starlette_app, host="127.0.0.1", port=port)
else:
from mcp.server.stdio import stdio_server
async def arun():
async with stdio_server() as streams:
await app.run(streams[0], streams[1], app.create_initialization_options())
anyio.run(arun)
return 0
```
## /examples/servers/simple-prompt/pyproject.toml
```toml path="/examples/servers/simple-prompt/pyproject.toml"
[project]
name = "mcp-simple-prompt"
version = "0.1.0"
description = "A simple MCP server exposing a customizable prompt"
readme = "README.md"
requires-python = ">=3.10"
authors = [{ name = "Anthropic, PBC." }]
maintainers = [
{ name = "David Soria Parra", email = "davidsp@anthropic.com" },
{ name = "Justin Spahr-Summers", email = "justin@anthropic.com" },
]
keywords = ["mcp", "llm", "automation", "web", "fetch"]
license = { text = "MIT" }
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
]
dependencies = ["anyio>=4.5", "click>=8.2.0", "httpx>=0.27", "mcp"]
[project.scripts]
mcp-simple-prompt = "mcp_simple_prompt.server:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["mcp_simple_prompt"]
[tool.pyright]
include = ["mcp_simple_prompt"]
venvPath = "."
venv = ".venv"
[tool.ruff.lint]
select = ["E", "F", "I"]
ignore = []
[tool.ruff]
line-length = 120
target-version = "py310"
[dependency-groups]
dev = ["pyright>=1.1.378", "pytest>=8.3.3", "ruff>=0.6.9"]
```
## /examples/servers/simple-resource/.python-version
```python-version path="/examples/servers/simple-resource/.python-version"
3.10
```
## /examples/servers/simple-resource/README.md
# MCP Simple Resource
A simple MCP server that exposes sample text files as resources.
## Usage
Start the server using either stdio (default) or SSE transport:
```bash
# Using stdio transport (default)
uv run mcp-simple-resource
# Using SSE transport on custom port
uv run mcp-simple-resource --transport sse --port 8000
```
The server exposes some basic text file resources that can be read by clients.
## Example
Using the MCP client, you can retrieve resources like this using the STDIO transport:
```python
import asyncio
from mcp.types import AnyUrl
from mcp.client.session import ClientSession
from mcp.client.stdio import StdioServerParameters, stdio_client
async def main():
async with stdio_client(
StdioServerParameters(command="uv", args=["run", "mcp-simple-resource"])
) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
# List available resources
resources = await session.list_resources()
print(resources)
# Get a specific resource
resource = await session.read_resource(AnyUrl("file:///greeting.txt"))
print(resource)
asyncio.run(main())
```
## /examples/servers/simple-resource/mcp_simple_resource/__init__.py
```py path="/examples/servers/simple-resource/mcp_simple_resource/__init__.py"
```
## /examples/servers/simple-resource/mcp_simple_resource/__main__.py
```py path="/examples/servers/simple-resource/mcp_simple_resource/__main__.py"
import sys
from .server import main
sys.exit(main()) # type: ignore[call-arg]
```
## /examples/servers/simple-resource/mcp_simple_resource/server.py
```py path="/examples/servers/simple-resource/mcp_simple_resource/server.py"
import anyio
import click
import mcp.types as types
from mcp.server.lowlevel import Server
from mcp.server.lowlevel.helper_types import ReadResourceContents
from pydantic import AnyUrl, FileUrl
from starlette.requests import Request
SAMPLE_RESOURCES = {
"greeting": {
"content": "Hello! This is a sample text resource.",
"title": "Welcome Message",
},
"help": {
"content": "This server provides a few sample text resources for testing.",
"title": "Help Documentation",
},
"about": {
"content": "This is the simple-resource MCP server implementation.",
"title": "About This Server",
},
}
@click.command()
@click.option("--port", default=8000, help="Port to listen on for SSE")
@click.option(
"--transport",
type=click.Choice(["stdio", "sse"]),
default="stdio",
help="Transport type",
)
def main(port: int, transport: str) -> int:
app = Server("mcp-simple-resource")
@app.list_resources()
async def list_resources() -> list[types.Resource]:
return [
types.Resource(
uri=FileUrl(f"file:///{name}.txt"),
name=name,
title=SAMPLE_RESOURCES[name]["title"],
description=f"A sample text resource named {name}",
mimeType="text/plain",
)
for name in SAMPLE_RESOURCES.keys()
]
@app.read_resource()
async def read_resource(uri: AnyUrl):
if uri.path is None:
raise ValueError(f"Invalid resource path: {uri}")
name = uri.path.replace(".txt", "").lstrip("/")
if name not in SAMPLE_RESOURCES:
raise ValueError(f"Unknown resource: {uri}")
return [ReadResourceContents(content=SAMPLE_RESOURCES[name]["content"], mime_type="text/plain")]
if transport == "sse":
from mcp.server.sse import SseServerTransport
from starlette.applications import Starlette
from starlette.responses import Response
from starlette.routing import Mount, Route
sse = SseServerTransport("/messages/")
async def handle_sse(request: Request):
async with sse.connect_sse(request.scope, request.receive, request._send) as streams: # type: ignore[reportPrivateUsage]
await app.run(streams[0], streams[1], app.create_initialization_options())
return Response()
starlette_app = Starlette(
debug=True,
routes=[
Route("/sse", endpoint=handle_sse, methods=["GET"]),
Mount("/messages/", app=sse.handle_post_message),
],
)
import uvicorn
uvicorn.run(starlette_app, host="127.0.0.1", port=port)
else:
from mcp.server.stdio import stdio_server
async def arun():
async with stdio_server() as streams:
await app.run(streams[0], streams[1], app.create_initialization_options())
anyio.run(arun)
return 0
```
## /examples/servers/simple-resource/pyproject.toml
```toml path="/examples/servers/simple-resource/pyproject.toml"
[project]
name = "mcp-simple-resource"
version = "0.1.0"
description = "A simple MCP server exposing sample text resources"
readme = "README.md"
requires-python = ">=3.10"
authors = [{ name = "Anthropic, PBC." }]
maintainers = [
{ name = "David Soria Parra", email = "davidsp@anthropic.com" },
{ name = "Justin Spahr-Summers", email = "justin@anthropic.com" },
]
keywords = ["mcp", "llm", "automation", "web", "fetch"]
license = { text = "MIT" }
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
]
dependencies = ["anyio>=4.5", "click>=8.2.0", "httpx>=0.27", "mcp"]
[project.scripts]
mcp-simple-resource = "mcp_simple_resource.server:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["mcp_simple_resource"]
[tool.pyright]
include = ["mcp_simple_resource"]
venvPath = "."
venv = ".venv"
[tool.ruff.lint]
select = ["E", "F", "I"]
ignore = []
[tool.ruff]
line-length = 120
target-version = "py310"
[dependency-groups]
dev = ["pyright>=1.1.378", "pytest>=8.3.3", "ruff>=0.6.9"]
```
## /examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/__init__.py
```py path="/examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/__init__.py"
```
## /examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/__main__.py
```py path="/examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/__main__.py"
from .server import main
if __name__ == "__main__":
# Click will handle CLI arguments
import sys
sys.exit(main()) # type: ignore[call-arg]
```
## /examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/__init__.py
```py path="/examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/__init__.py"
```
## /examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/__main__.py
```py path="/examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/__main__.py"
from .server import main
if __name__ == "__main__":
main() # type: ignore[call-arg]
```
## /examples/servers/simple-tool/.python-version
```python-version path="/examples/servers/simple-tool/.python-version"
3.10
```
## /examples/servers/simple-tool/mcp_simple_tool/__init__.py
```py path="/examples/servers/simple-tool/mcp_simple_tool/__init__.py"
```
## /examples/snippets/clients/__init__.py
```py path="/examples/snippets/clients/__init__.py"
```
The content has been capped at 50000 tokens. The user could consider applying other filters to refine the result. The better and more specific the context, the better the LLM can follow instructions. If the context seems verbose, the user can refine the filter using uithub. Thank you for using https://uithub.com - Perfect LLM context for any GitHub repo.