modelcontextprotocol/servers/main 152k tokens More Tools
```
├── .gitattributes (omitted)
├── .github/
   ├── pull_request_template.md (300 tokens)
   ├── workflows/
      ├── claude.yml (300 tokens)
      ├── python.yml (700 tokens)
      ├── release.yml (1400 tokens)
      ├── typescript.yml (600 tokens)
├── .gitignore (1100 tokens)
├── .npmrc
├── .vscode/
   ├── settings.json
├── CODE_OF_CONDUCT.md (1000 tokens)
├── CONTRIBUTING.md (400 tokens)
├── LICENSE (omitted)
├── README.md (67.5k tokens)
├── SECURITY.md (200 tokens)
├── package-lock.json (omitted)
├── package.json (200 tokens)
├── scripts/
   ├── release.py (1200 tokens)
├── src/
   ├── everything/
      ├── CLAUDE.md (200 tokens)
      ├── Dockerfile (100 tokens)
      ├── README.md (1900 tokens)
      ├── everything.ts (7.8k tokens)
      ├── index.ts (200 tokens)
      ├── instructions.md (300 tokens)
      ├── package.json (200 tokens)
      ├── sse.ts (400 tokens)
      ├── stdio.ts (100 tokens)
      ├── streamableHttp.ts (1200 tokens)
      ├── tsconfig.json
   ├── fetch/
      ├── .python-version
      ├── Dockerfile (200 tokens)
      ├── LICENSE (200 tokens)
      ├── README.md (1500 tokens)
      ├── pyproject.toml (200 tokens)
      ├── src/
         ├── mcp_server_fetch/
            ├── __init__.py (100 tokens)
            ├── __main__.py
            ├── server.py (2.1k tokens)
      ├── uv.lock (omitted)
   ├── filesystem/
      ├── Dockerfile (100 tokens)
      ├── README.md (2.2k tokens)
      ├── __tests__/
         ├── directory-tree.test.ts (1200 tokens)
         ├── lib.test.ts (5k tokens)
         ├── path-utils.test.ts (2.7k tokens)
         ├── path-validation.test.ts (8.3k tokens)
         ├── roots-utils.test.ts (600 tokens)
      ├── index.ts (5.5k tokens)
      ├── lib.ts (2.6k tokens)
      ├── package.json (200 tokens)
      ├── path-utils.ts (800 tokens)
      ├── path-validation.ts (500 tokens)
      ├── roots-utils.ts (600 tokens)
      ├── tsconfig.json
      ├── vitest.config.ts (100 tokens)
   ├── git/
      ├── .gitignore
      ├── .python-version
      ├── Dockerfile (300 tokens)
      ├── LICENSE (200 tokens)
      ├── README.md (2.2k tokens)
      ├── pyproject.toml (200 tokens)
      ├── src/
         ├── mcp_server_git/
            ├── __init__.py (100 tokens)
            ├── __main__.py
            ├── py.typed
            ├── server.py (3k tokens)
      ├── tests/
         ├── test_server.py (700 tokens)
      ├── uv.lock (omitted)
   ├── memory/
      ├── Dockerfile (100 tokens)
      ├── README.md (1900 tokens)
      ├── __tests__/
         ├── file-path.test.ts (1000 tokens)
         ├── knowledge-graph.test.ts (2.8k tokens)
      ├── index.ts (3.5k tokens)
      ├── package.json (200 tokens)
      ├── tsconfig.json
      ├── vitest.config.ts (100 tokens)
   ├── sequentialthinking/
      ├── Dockerfile (100 tokens)
      ├── README.md (1000 tokens)
      ├── __tests__/
         ├── lib.test.ts (2.5k tokens)
      ├── index.ts (1100 tokens)
      ├── lib.ts (800 tokens)
      ├── package.json (200 tokens)
      ├── tsconfig.json
      ├── vitest.config.ts (100 tokens)
   ├── time/
      ├── .python-version
      ├── Dockerfile (300 tokens)
      ├── README.md (1500 tokens)
      ├── pyproject.toml (200 tokens)
      ├── src/
         ├── mcp_server_time/
            ├── __init__.py (100 tokens)
            ├── __main__.py
            ├── server.py (1500 tokens)
      ├── test/
         ├── time_server_test.py (3.6k tokens)
      ├── uv.lock (omitted)
├── tsconfig.json (100 tokens)
```


## /.github/pull_request_template.md

<!-- Provide a brief description of your changes -->

## Description

## Server Details
<!-- If modifying an existing server, provide details -->
- Server: <!-- e.g., filesystem, github -->
- Changes to: <!-- e.g., tools, resources, prompts -->

## Motivation and Context
<!-- Why is this change needed? What problem does it solve? -->

## How Has This Been Tested?
<!-- Have you tested this with an LLM client? Which scenarios were tested? -->

## Breaking Changes
<!-- Will users need to update their MCP client configurations? -->

## Types of changes
<!-- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
- [ ] Documentation update

## Checklist
<!-- Go over all the following points, and put an `x` in all the boxes that apply. -->
- [ ] I have read the [MCP Protocol Documentation](https://modelcontextprotocol.io)
- [ ] My changes follows MCP security best practices
- [ ] I have updated the server's README accordingly
- [ ] I have tested this with an LLM client
- [ ] My code follows the repository's style guidelines
- [ ] New and existing tests pass locally
- [ ] I have added appropriate error handling
- [ ] I have documented all environment variables and configuration options

## Additional context
<!-- Add any other context, implementation notes, or design decisions -->


## /.github/workflows/claude.yml

```yml path="/.github/workflows/claude.yml" 
name: Claude Code

on:
  issue_comment:
    types: [created]
  pull_request_review_comment:
    types: [created]
  issues:
    types: [opened, assigned]
  pull_request_review:
    types: [submitted]

jobs:
  claude:
    if: |
      (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
      (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
      (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
      (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
    runs-on: ubuntu-latest
    permissions:
      contents: read
      pull-requests: read
      issues: read
      id-token: write
      actions: read
    steps:
      - name: Checkout repository
        uses: actions/checkout@v4
        with:
          fetch-depth: 1

      - name: Run Claude Code
        id: claude
        uses: anthropics/claude-code-action@beta
        with:
          anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}

          # Allow Claude to read CI results on PRs
          additional_permissions: |
            actions: read

          # Trigger when assigned to an issue
          assignee_trigger: "claude"
          
          # Allow Claude to run bash
          # This should be safe given the repo is already public
          allowed_tools: "Bash"
          
          custom_instructions: |
            If posting a comment to GitHub, give a concise summary of the comment at the top and put all the details in a <details> block.

```

## /.github/workflows/python.yml

```yml path="/.github/workflows/python.yml" 
name: Python

on:
  push:
    branches:
      - main
  pull_request:
  release:
    types: [published]

jobs:
  detect-packages:
    runs-on: ubuntu-latest
    outputs:
      packages: ${{ steps.find-packages.outputs.packages }}
    steps:
      - uses: actions/checkout@v4

      - name: Find Python packages
        id: find-packages
        working-directory: src
        run: |
          PACKAGES=$(find . -name pyproject.toml -exec dirname {} \; | sed 's/^\.\///' | jq -R -s -c 'split("\n")[:-1]')
          echo "packages=$PACKAGES" >> $GITHUB_OUTPUT

  test:
    needs: [detect-packages]
    strategy:
      matrix:
        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}
    name: Test ${{ matrix.package }}
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4

      - name: Install uv
        uses: astral-sh/setup-uv@v3

      - name: Set up Python
        uses: actions/setup-python@v5
        with:
          python-version-file: "src/${{ matrix.package }}/.python-version"

      - name: Install dependencies
        working-directory: src/${{ matrix.package }}
        run: uv sync --frozen --all-extras --dev

      - name: Check if tests exist
        id: check-tests
        working-directory: src/${{ matrix.package }}
        run: |
          if [ -d "tests" ] || [ -d "test" ] || grep -q "pytest" pyproject.toml; then
            echo "has-tests=true" >> $GITHUB_OUTPUT
          else
            echo "has-tests=false" >> $GITHUB_OUTPUT
          fi

      - name: Run tests
        if: steps.check-tests.outputs.has-tests == 'true'
        working-directory: src/${{ matrix.package }}
        run: uv run pytest

  build:
    needs: [detect-packages, test]
    strategy:
      matrix:
        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}
    name: Build ${{ matrix.package }}
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4

      - name: Install uv
        uses: astral-sh/setup-uv@v3

      - name: Set up Python
        uses: actions/setup-python@v5
        with:
          python-version-file: "src/${{ matrix.package }}/.python-version"

      - name: Install dependencies
        working-directory: src/${{ matrix.package }}
        run: uv sync --frozen --all-extras --dev

      - name: Run pyright
        working-directory: src/${{ matrix.package }}
        run: uv run --frozen pyright

      - name: Build package
        working-directory: src/${{ matrix.package }}
        run: uv build

      - name: Upload artifacts
        uses: actions/upload-artifact@v4
        with:
          name: dist-${{ matrix.package }}
          path: src/${{ matrix.package }}/dist/

  publish:
    runs-on: ubuntu-latest
    needs: [build, detect-packages]
    if: github.event_name == 'release'

    strategy:
      matrix:
        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}
    name: Publish ${{ matrix.package }}

    environment: release
    permissions:
      id-token: write # Required for trusted publishing

    steps:
      - name: Download artifacts
        uses: actions/download-artifact@v4
        with:
          name: dist-${{ matrix.package }}
          path: dist/

      - name: Publish package to PyPI
        uses: pypa/gh-action-pypi-publish@release/v1

```

## /.github/workflows/release.yml

```yml path="/.github/workflows/release.yml" 
name: Automatic Release Creation

on:
  workflow_dispatch:
  schedule:
    - cron: '0 10 * * *'

jobs:
  create-metadata:
    runs-on: ubuntu-latest
    if: github.repository_owner == 'modelcontextprotocol'
    outputs:
      hash: ${{ steps.last-release.outputs.hash }}
      version: ${{ steps.create-version.outputs.version}}
      npm_packages: ${{ steps.create-npm-packages.outputs.npm_packages}}
      pypi_packages: ${{ steps.create-pypi-packages.outputs.pypi_packages}}
    steps:
      - uses: actions/checkout@v4
        with:
          fetch-depth: 0

      - name: Get last release hash
        id: last-release
        run: |
          HASH=$(git rev-list --tags --max-count=1 || echo "HEAD~1")
          echo "hash=${HASH}" >> $GITHUB_OUTPUT
          echo "Using last release hash: ${HASH}"

      - name: Install uv
        uses: astral-sh/setup-uv@v5

      - name: Create version name
        id: create-version
        run: |
          VERSION=$(uv run --script scripts/release.py generate-version)
          echo "version $VERSION"
          echo "version=$VERSION" >> $GITHUB_OUTPUT

      - name: Create notes
        run: |
          HASH="${{ steps.last-release.outputs.hash }}"
          uv run --script scripts/release.py generate-notes --directory src/ $HASH > RELEASE_NOTES.md
          cat RELEASE_NOTES.md

      - name: Release notes
        uses: actions/upload-artifact@v4
        with:
          name: release-notes
          path: RELEASE_NOTES.md

      - name: Create python matrix
        id: create-pypi-packages
        run: |
          HASH="${{ steps.last-release.outputs.hash }}"
          PYPI=$(uv run --script scripts/release.py generate-matrix --pypi --directory src $HASH)
          echo "pypi_packages $PYPI"
          echo "pypi_packages=$PYPI" >> $GITHUB_OUTPUT

      - name: Create npm matrix
        id: create-npm-packages
        run: |
          HASH="${{ steps.last-release.outputs.hash }}"
          NPM=$(uv run --script scripts/release.py generate-matrix --npm --directory src $HASH)
          echo "npm_packages $NPM"
          echo "npm_packages=$NPM" >> $GITHUB_OUTPUT

  update-packages:
    needs: [create-metadata]
    if: ${{ needs.create-metadata.outputs.npm_packages != '[]' || needs.create-metadata.outputs.pypi_packages != '[]' }}
    runs-on: ubuntu-latest
    environment: release
    outputs:
      changes_made: ${{ steps.commit.outputs.changes_made }}
    steps:
      - uses: actions/checkout@v4
        with:
          fetch-depth: 0

      - name: Install uv
        uses: astral-sh/setup-uv@v5

      - name: Update packages
        run: |
          HASH="${{ needs.create-metadata.outputs.hash }}"
          uv run --script scripts/release.py update-packages --directory src/ $HASH

      - name: Configure git
        run: |
          git config --global user.name "GitHub Actions"
          git config --global user.email "actions@github.com"

      - name: Commit changes
        id: commit
        run: |
          VERSION="${{ needs.create-metadata.outputs.version }}"
          git add -u
          if git diff-index --quiet HEAD; then
            echo "changes_made=false" >> $GITHUB_OUTPUT
          else
            git commit -m 'Automatic update of packages'
            git tag -a "$VERSION" -m "Release $VERSION"
            git push origin "$VERSION"
            echo "changes_made=true" >> $GITHUB_OUTPUT
          fi

  publish-pypi:
    needs: [update-packages, create-metadata]
    if: ${{ needs.create-metadata.outputs.pypi_packages != '[]' && needs.create-metadata.outputs.pypi_packages != '' }}
    strategy:
      fail-fast: false
      matrix:
        package: ${{ fromJson(needs.create-metadata.outputs.pypi_packages) }}
    name: Build ${{ matrix.package }}
    environment: release
    permissions:
      id-token: write # Required for trusted publishing
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
        with:
          ref: ${{ needs.create-metadata.outputs.version }}

      - name: Install uv
        uses: astral-sh/setup-uv@v5

      - name: Set up Python
        uses: actions/setup-python@v5
        with:
          python-version-file: "src/${{ matrix.package }}/.python-version"

      - name: Install dependencies
        working-directory: src/${{ matrix.package }}
        run: uv sync --frozen --all-extras --dev

      - name: Run pyright
        working-directory: src/${{ matrix.package }}
        run: uv run --frozen pyright

      - name: Build package
        working-directory: src/${{ matrix.package }}
        run: uv build

      - name: Publish package to PyPI
        uses: pypa/gh-action-pypi-publish@release/v1
        with:
          packages-dir: src/${{ matrix.package }}/dist

  publish-npm:
    needs: [update-packages, create-metadata]
    if: ${{ needs.create-metadata.outputs.npm_packages != '[]' && needs.create-metadata.outputs.npm_packages != '' }}
    strategy:
      fail-fast: false
      matrix:
        package: ${{ fromJson(needs.create-metadata.outputs.npm_packages) }}
    name: Build ${{ matrix.package }}
    environment: release
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
        with:
          ref: ${{ needs.create-metadata.outputs.version }}

      - uses: actions/setup-node@v4
        with:
          node-version: 22
          cache: npm
          registry-url: 'https://registry.npmjs.org'

      - name: Install dependencies
        working-directory: src/${{ matrix.package }}
        run: npm ci

      - name: Check if version exists on npm
        working-directory: src/${{ matrix.package }}
        run: |
          VERSION=$(jq -r .version package.json)
          if npm view --json | jq -e --arg version "$VERSION" '[.[]][0].versions | contains([$version])'; then
            echo "Version $VERSION already exists on npm"
            exit 1
          fi
          echo "Version $VERSION is new, proceeding with publish"

      - name: Build package
        working-directory: src/${{ matrix.package }}
        run: npm run build

      - name: Publish package
        working-directory: src/${{ matrix.package }}
        run: |
          npm publish --access public
        env:
          NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

  create-release:
    needs: [update-packages, create-metadata, publish-pypi, publish-npm]
    if: |
      always() &&
      needs.update-packages.outputs.changes_made == 'true' &&
      (needs.publish-pypi.result == 'success' || needs.publish-npm.result == 'success')
    runs-on: ubuntu-latest
    environment: release
    permissions:
      contents: write
    steps:
      - uses: actions/checkout@v4

      - name: Download release notes
        uses: actions/download-artifact@v4
        with:
          name: release-notes

      - name: Create release
        env:
          GH_TOKEN: ${{ secrets.GITHUB_TOKEN}}
        run: |
          VERSION="${{ needs.create-metadata.outputs.version }}"
          gh release create "$VERSION" \
            --title "Release $VERSION" \
            --notes-file RELEASE_NOTES.md


```

## /.github/workflows/typescript.yml

```yml path="/.github/workflows/typescript.yml" 
name: TypeScript

on:
  push:
    branches:
      - main
  pull_request:
  release:
    types: [published]

jobs:
  detect-packages:
    runs-on: ubuntu-latest
    outputs:
      packages: ${{ steps.find-packages.outputs.packages }}
    steps:
      - uses: actions/checkout@v4
      - name: Find JS packages
        id: find-packages
        working-directory: src
        run: |
          PACKAGES=$(find . -name package.json -not -path "*/node_modules/*" -exec dirname {} \; | sed 's/^\.\///' | jq -R -s -c 'split("\n")[:-1]')
          echo "packages=$PACKAGES" >> $GITHUB_OUTPUT

  test:
    needs: [detect-packages]
    strategy:
      matrix:
        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}
    name: Test ${{ matrix.package }}
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4

      - uses: actions/setup-node@v4
        with:
          node-version: 22
          cache: npm

      - name: Install dependencies
        working-directory: src/${{ matrix.package }}
        run: npm ci

      - name: Check if tests exist
        id: check-tests
        working-directory: src/${{ matrix.package }}
        run: |
          if npm run test --silent 2>/dev/null; then
            echo "has-tests=true" >> $GITHUB_OUTPUT
          else
            echo "has-tests=false" >> $GITHUB_OUTPUT
          fi
        continue-on-error: true

      - name: Run tests
        if: steps.check-tests.outputs.has-tests == 'true'
        working-directory: src/${{ matrix.package }}
        run: npm test

  build:
    needs: [detect-packages, test]
    strategy:
      matrix:
        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}
    name: Build ${{ matrix.package }}
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4

      - uses: actions/setup-node@v4
        with:
          node-version: 22
          cache: npm

      - name: Install dependencies
        working-directory: src/${{ matrix.package }}
        run: npm ci

      - name: Build package
        working-directory: src/${{ matrix.package }}
        run: npm run build

  publish:
    runs-on: ubuntu-latest
    needs: [build, detect-packages]
    if: github.event_name == 'release'
    environment: release

    strategy:
      matrix:
        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}
    name: Publish ${{ matrix.package }}

    permissions:
      contents: read
      id-token: write

    steps:
      - uses: actions/checkout@v4
      - uses: actions/setup-node@v4
        with:
          node-version: 22
          cache: npm
          registry-url: "https://registry.npmjs.org"

      - name: Install dependencies
        working-directory: src/${{ matrix.package }}
        run: npm ci

      - name: Publish package
        working-directory: src/${{ matrix.package }}
        run: npm publish --access public
        env:
          NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

```

## /.gitignore

```gitignore path="/.gitignore" 
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*

# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json

# Runtime data
pids
*.pid
*.seed
*.pid.lock

# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov

# Coverage directory used by tools like istanbul
coverage
*.lcov

# nyc test coverage
.nyc_output

# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt

# Bower dependency directory (https://bower.io/)
bower_components

# node-waf configuration
.lock-wscript

# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release

# Dependency directories
node_modules/
jspm_packages/

# Snowpack dependency directory (https://snowpack.dev/)
web_modules/

# TypeScript cache
*.tsbuildinfo

# Optional npm cache directory
.npm

# Optional eslint cache
.eslintcache

# Optional stylelint cache
.stylelintcache

# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/

# Optional REPL history
.node_repl_history

# Output of 'npm pack'
*.tgz

# Yarn Integrity file
.yarn-integrity

# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local

# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache

# Next.js build output
.next
out

# Nuxt.js build / generate output
.nuxt
dist

# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public

# vuepress build output
.vuepress/dist

# vuepress v2.x temp and cache directory
.temp
.cache

# Docusaurus cache and generated files
.docusaurus

# Serverless directories
.serverless/

# FuseBox cache
.fusebox/

# DynamoDB Local files
.dynamodb/

# TernJS port file
.tern-port

# Stores VSCode versions used for testing VSCode extensions
.vscode-test

# Jetbrains IDEs
.idea/

# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*

build/

gcp-oauth.keys.json
.*-server-credentials.json

# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
#   For a library or package, you might want to ignore these files since the code is
#   intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
#   However, in case of collaboration, if having platform-specific dependencies or dependencies
#   having no cross-platform support, pipenv may install dependencies that don't work, or not
#   install all needed dependencies.
#Pipfile.lock

# poetry
#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
#   This is especially recommended for binary packages to ensure reproducibility, and is more
#   commonly ignored for libraries.
#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock

# pdm
#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
#   in version control.
#   https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/

# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

.DS_Store

# PyCharm
#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
#  and can be added to the global gitignore or merged into this file.  For a more nuclear
#  option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.claude/settings.local.json

```

## /.npmrc

```npmrc path="/.npmrc" 
registry="https://registry.npmjs.org/"
@modelcontextprotocol:registry="https://registry.npmjs.org/"

```

## /.vscode/settings.json

```json path="/.vscode/settings.json" 
{}
```

## /CODE_OF_CONDUCT.md

# Contributor Covenant Code of Conduct

## Our Pledge

We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.

We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.

## Our Standards

Examples of behavior that contributes to a positive environment for our
community include:

* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
  and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
  overall community

Examples of unacceptable behavior include:

* The use of sexualized language or imagery, and sexual attention or
  advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
  address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
  professional setting

## Enforcement Responsibilities

Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.

Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.

## Scope

This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.

## Enforcement

Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
mcp-coc@anthropic.com.
All complaints will be reviewed and investigated promptly and fairly.

All community leaders are obligated to respect the privacy and security of the
reporter of any incident.

## Enforcement Guidelines

Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:

### 1. Correction

**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.

**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.

### 2. Warning

**Community Impact**: A violation through a single incident or series
of actions.

**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.

### 3. Temporary Ban

**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.

**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.

### 4. Permanent Ban

**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior,  harassment of an
individual, or aggression toward or disparagement of classes of individuals.

**Consequence**: A permanent ban from any sort of public interaction within
the community.

## Attribution

This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.

Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).

[homepage]: https://www.contributor-covenant.org

For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.


## /CONTRIBUTING.md

# Contributing to MCP Servers

Thanks for your interest in contributing! Here's how you can help make this repo better.

We accept changes through [the standard GitHub flow model](https://docs.github.com/en/get-started/using-github/github-flow).

## Server Listings

We welcome PRs that add links to your servers in the [README.md](./README.md)!

## Server Implementations

We welcome:
- **Bug fixes** — Help us squash those pesky bugs.
- **Usability improvements** — Making servers easier to use for humans and agents.
- **Enhancements that demonstrate MCP protocol features** — We encourage contributions that help reference servers better illustrate underutilized aspects of the MCP protocol beyond just Tools, such as Resources, Prompts, or Roots. For example, adding Roots support to filesystem-server helps showcase this important but lesser-known feature.

We're more selective about:
- **Other new features** — Especially if they're not crucial to the server's core purpose or are highly opinionated. The existing servers are reference servers meant to inspire the community. If you need specific features, we encourage you to build enhanced versions! We think a diverse ecosystem of servers is beneficial for everyone, and would love to link to your improved server in our README.

We don't accept:
- **New server implementations** — We encourage you to publish them yourself, and link to them from the README.

## Testing

When adding or configuring tests for servers implemented in TypeScript, use **vitest** as the test framework. Vitest provides better ESM support, faster test execution, and a more modern testing experience.

## Documentation

Improvements to existing documentation is welcome - although generally we'd prefer ergonomic improvements than documenting pain points if possible!

We're more selective about adding wholly new documentation, especially in ways that aren't vendor neutral (e.g. how to run a particular server with a particular client).

## Community

[Learn how the MCP community communicates](https://modelcontextprotocol.io/community/communication).

Thank you for helping make MCP servers better for everyone!

## /SECURITY.md

# Security Policy
Thank you for helping us keep our MCP servers secure.

The **reference servers** in this repo are maintained by [Anthropic](https://www.anthropic.com/) as part of the Model Context Protocol project.

The security of our systems and user data is Anthropic’s top priority. We appreciate the work of security researchers acting in good faith in identifying and reporting potential vulnerabilities.

## Vulnerability Disclosure Program

Our Vulnerability Program guidelines are defined on our [HackerOne program page](https://hackerone.com/anthropic-vdp). We ask that any validated vulnerability in this functionality be reported through the [submission form](https://hackerone.com/anthropic-vdp/reports/new?type=team&report_type=vulnerability).


## /package.json

```json path="/package.json" 
{
  "name": "@modelcontextprotocol/servers",
  "private": true,
  "version": "0.6.2",
  "description": "Model Context Protocol servers",
  "license": "MIT",
  "author": "Anthropic, PBC (https://anthropic.com)",
  "homepage": "https://modelcontextprotocol.io",
  "bugs": "https://github.com/modelcontextprotocol/servers/issues",
  "type": "module",
  "workspaces": [
    "src/*"
  ],
  "files": [],
  "scripts": {
    "build": "npm run build --workspaces",
    "watch": "npm run watch --workspaces",
    "publish-all": "npm publish --workspaces --access public",
    "link-all": "npm link --workspaces"
  },
  "dependencies": {
    "@modelcontextprotocol/server-everything": "*",
    "@modelcontextprotocol/server-memory": "*",
    "@modelcontextprotocol/server-filesystem": "*",
    "@modelcontextprotocol/server-sequential-thinking": "*"
  }
}

```

## /scripts/release.py

```py path="/scripts/release.py" 
#!/usr/bin/env uv run --script
# /// script
# requires-python = ">=3.12"
# dependencies = [
#     "click>=8.1.8",
#     "tomlkit>=0.13.2"
# ]
# ///
import sys
import re
import click
from pathlib import Path
import json
import tomlkit
import datetime
import subprocess
from dataclasses import dataclass
from typing import Any, Iterator, NewType, Protocol


Version = NewType("Version", str)
GitHash = NewType("GitHash", str)


class GitHashParamType(click.ParamType):
    name = "git_hash"

    def convert(
        self, value: Any, param: click.Parameter | None, ctx: click.Context | None
    ) -> GitHash | None:
        if value is None:
            return None

        if not (8 <= len(value) <= 40):
            self.fail(f"Git hash must be between 8 and 40 characters, got {len(value)}")

        if not re.match(r"^[0-9a-fA-F]+{{contextString}}quot;, value):
            self.fail("Git hash must contain only hex digits (0-9, a-f)")

        try:
            # Verify hash exists in repo
            subprocess.run(
                ["git", "rev-parse", "--verify", value], check=True, capture_output=True
            )
        except subprocess.CalledProcessError:
            self.fail(f"Git hash {value} not found in repository")

        return GitHash(value.lower())


GIT_HASH = GitHashParamType()


class Package(Protocol):
    path: Path

    def package_name(self) -> str: ...

    def update_version(self, version: Version) -> None: ...


@dataclass
class NpmPackage:
    path: Path

    def package_name(self) -> str:
        with open(self.path / "package.json", "r") as f:
            return json.load(f)["name"]

    def update_version(self, version: Version):
        with open(self.path / "package.json", "r+") as f:
            data = json.load(f)
            data["version"] = version
            f.seek(0)
            json.dump(data, f, indent=2)
            f.truncate()


@dataclass
class PyPiPackage:
    path: Path

    def package_name(self) -> str:
        with open(self.path / "pyproject.toml") as f:
            toml_data = tomlkit.parse(f.read())
            name = toml_data.get("project", {}).get("name")
            if not name:
                raise Exception("No name in pyproject.toml project section")
            return str(name)

    def update_version(self, version: Version):
        # Update version in pyproject.toml
        with open(self.path / "pyproject.toml") as f:
            data = tomlkit.parse(f.read())
            data["project"]["version"] = version

        with open(self.path / "pyproject.toml", "w") as f:
            f.write(tomlkit.dumps(data))


def has_changes(path: Path, git_hash: GitHash) -> bool:
    """Check if any files changed between current state and git hash"""
    try:
        output = subprocess.run(
            ["git", "diff", "--name-only", git_hash, "--", "."],
            cwd=path,
            check=True,
            capture_output=True,
            text=True,
        )

        changed_files = [Path(f) for f in output.stdout.splitlines()]
        relevant_files = [f for f in changed_files if f.suffix in [".py", ".ts"]]
        return len(relevant_files) >= 1
    except subprocess.CalledProcessError:
        return False


def gen_version() -> Version:
    """Generate version based on current date"""
    now = datetime.datetime.now()
    return Version(f"{now.year}.{now.month}.{now.day}")


def find_changed_packages(directory: Path, git_hash: GitHash) -> Iterator[Package]:
    for path in directory.glob("*/package.json"):
        if has_changes(path.parent, git_hash):
            yield NpmPackage(path.parent)
    for path in directory.glob("*/pyproject.toml"):
        if has_changes(path.parent, git_hash):
            yield PyPiPackage(path.parent)


@click.group()
def cli():
    pass


@cli.command("update-packages")
@click.option(
    "--directory", type=click.Path(exists=True, path_type=Path), default=Path.cwd()
)
@click.argument("git_hash", type=GIT_HASH)
def update_packages(directory: Path, git_hash: GitHash) -> int:
    # Detect package type
    path = directory.resolve(strict=True)
    version = gen_version()

    for package in find_changed_packages(path, git_hash):
        name = package.package_name()
        package.update_version(version)

        click.echo(f"{name}@{version}")

    return 0


@cli.command("generate-notes")
@click.option(
    "--directory", type=click.Path(exists=True, path_type=Path), default=Path.cwd()
)
@click.argument("git_hash", type=GIT_HASH)
def generate_notes(directory: Path, git_hash: GitHash) -> int:
    # Detect package type
    path = directory.resolve(strict=True)
    version = gen_version()

    click.echo(f"# Release : v{version}")
    click.echo("")
    click.echo("## Updated packages")
    for package in find_changed_packages(path, git_hash):
        name = package.package_name()
        click.echo(f"- {name}@{version}")

    return 0


@cli.command("generate-version")
def generate_version() -> int:
    # Detect package type
    click.echo(gen_version())
    return 0


@cli.command("generate-matrix")
@click.option(
    "--directory", type=click.Path(exists=True, path_type=Path), default=Path.cwd()
)
@click.option("--npm", is_flag=True, default=False)
@click.option("--pypi", is_flag=True, default=False)
@click.argument("git_hash", type=GIT_HASH)
def generate_matrix(directory: Path, git_hash: GitHash, pypi: bool, npm: bool) -> int:
    # Detect package type
    path = directory.resolve(strict=True)
    version = gen_version()

    changes = []
    for package in find_changed_packages(path, git_hash):
        pkg = package.path.relative_to(path)
        if npm and isinstance(package, NpmPackage):
            changes.append(str(pkg))
        if pypi and isinstance(package, PyPiPackage):
            changes.append(str(pkg))

    click.echo(json.dumps(changes))
    return 0


if __name__ == "__main__":
    sys.exit(cli())

```

## /src/everything/CLAUDE.md

# MCP "Everything" Server - Development Guidelines

## Build, Test & Run Commands
- Build: `npm run build` - Compiles TypeScript to JavaScript
- Watch mode: `npm run watch` - Watches for changes and rebuilds automatically
- Run server: `npm run start` - Starts the MCP server using stdio transport
- Run SSE server: `npm run start:sse` - Starts the MCP server with SSE transport
- Prepare release: `npm run prepare` - Builds the project for publishing

## Code Style Guidelines
- Use ES modules with `.js` extension in import paths
- Strictly type all functions and variables with TypeScript
- Follow zod schema patterns for tool input validation
- Prefer async/await over callbacks and Promise chains
- Place all imports at top of file, grouped by external then internal
- Use descriptive variable names that clearly indicate purpose
- Implement proper cleanup for timers and resources in server shutdown
- Follow camelCase for variables/functions, PascalCase for types/classes, UPPER_CASE for constants
- Handle errors with try/catch blocks and provide clear error messages
- Use consistent indentation (2 spaces) and trailing commas in multi-line objects

## /src/everything/Dockerfile

``` path="/src/everything/Dockerfile" 
FROM node:22.12-alpine AS builder

COPY src/everything /app
COPY tsconfig.json /tsconfig.json

WORKDIR /app

RUN --mount=type=cache,target=/root/.npm npm install

FROM node:22-alpine AS release

WORKDIR /app

COPY --from=builder /app/dist /app/dist
COPY --from=builder /app/package.json /app/package.json
COPY --from=builder /app/package-lock.json /app/package-lock.json

ENV NODE_ENV=production

RUN npm ci --ignore-scripts --omit-dev

CMD ["node", "dist/index.js"]
```

## /src/everything/README.md

# Everything MCP Server

This MCP server attempts to exercise all the features of the MCP protocol. It is not intended to be a useful server, but rather a test server for builders of MCP clients. It implements prompts, tools, resources, sampling, and more to showcase MCP capabilities.

## Components

### Tools

1. `echo`
   - Simple tool to echo back input messages
   - Input:
     - `message` (string): Message to echo back
   - Returns: Text content with echoed message

2. `add`
   - Adds two numbers together
   - Inputs:
     - `a` (number): First number
     - `b` (number): Second number
   - Returns: Text result of the addition

3. `longRunningOperation`
   - Demonstrates progress notifications for long operations
   - Inputs:
     - `duration` (number, default: 10): Duration in seconds
     - `steps` (number, default: 5): Number of progress steps
   - Returns: Completion message with duration and steps
   - Sends progress notifications during execution

4. `printEnv`
   - Prints all environment variables
   - Useful for debugging MCP server configuration
   - No inputs required
   - Returns: JSON string of all environment variables

5. `sampleLLM`
   - Demonstrates LLM sampling capability using MCP sampling feature
   - Inputs:
     - `prompt` (string): The prompt to send to the LLM
     - `maxTokens` (number, default: 100): Maximum tokens to generate
   - Returns: Generated LLM response

6. `getTinyImage`
   - Returns a small test image
   - No inputs required
   - Returns: Base64 encoded PNG image data

7. `annotatedMessage`
   - Demonstrates how annotations can be used to provide metadata about content
   - Inputs:
     - `messageType` (enum: "error" | "success" | "debug"): Type of message to demonstrate different annotation patterns
     - `includeImage` (boolean, default: false): Whether to include an example image
   - Returns: Content with varying annotations:
     - Error messages: High priority (1.0), visible to both user and assistant
     - Success messages: Medium priority (0.7), user-focused
     - Debug messages: Low priority (0.3), assistant-focused
     - Optional image: Medium priority (0.5), user-focused
   - Example annotations:
     ```json
     {
       "priority": 1.0,
       "audience": ["user", "assistant"]
     }
     ```

8. `getResourceReference`
   - Returns a resource reference that can be used by MCP clients
   - Inputs:
     - `resourceId` (number, 1-100): ID of the resource to reference
   - Returns: A resource reference with:
     - Text introduction
     - Embedded resource with `type: "resource"`
     - Text instruction for using the resource URI

9. `startElicitation`
   - Initiates an elicitation (interaction) within the MCP client.
   - Inputs:
      - `color` (string): Favorite color
      - `number` (number, 1-100): Favorite number
      - `pets` (enum): Favorite pet
   - Returns: Confirmation of the elicitation demo with selection summary.

10. `structuredContent`
   - Demonstrates a tool returning structured content using the example in the specification
   - Provides an output schema to allow testing of client SHOULD advisory to validate the result using the schema
   - Inputs:
     - `location` (string): A location or ZIP code, mock data is returned regardless of value
   - Returns: a response with
     - `structuredContent` field conformant to the output schema
     - A backward compatible Text Content field, a SHOULD advisory in the specification

11. `listRoots`
   - Lists the current MCP roots provided by the client
   - Demonstrates the roots protocol capability even though this server doesn't access files
   - No inputs required
   - Returns: List of current roots with their URIs and names, or a message if no roots are set
   - Shows how servers can interact with the MCP roots protocol

### Resources

The server provides 100 test resources in two formats:
- Even numbered resources:
  - Plaintext format
  - URI pattern: `test://static/resource/{even_number}`
  - Content: Simple text description

- Odd numbered resources:
  - Binary blob format
  - URI pattern: `test://static/resource/{odd_number}`
  - Content: Base64 encoded binary data

Resource features:
- Supports pagination (10 items per page)
- Allows subscribing to resource updates
- Demonstrates resource templates
- Auto-updates subscribed resources every 5 seconds

### Prompts

1. `simple_prompt`
   - Basic prompt without arguments
   - Returns: Single message exchange

2. `complex_prompt`
   - Advanced prompt demonstrating argument handling
   - Required arguments:
     - `temperature` (string): Temperature setting
   - Optional arguments:
     - `style` (string): Output style preference
   - Returns: Multi-turn conversation with images

3. `resource_prompt`
   - Demonstrates embedding resource references in prompts
   - Required arguments:
     - `resourceId` (number): ID of the resource to embed (1-100)
   - Returns: Multi-turn conversation with an embedded resource reference
   - Shows how to include resources directly in prompt messages

### Roots

The server demonstrates the MCP roots protocol capability:

- Declares `roots: { listChanged: true }` capability to indicate support for roots
- Handles `roots/list_changed` notifications from clients
- Requests initial roots during server initialization
- Provides a `listRoots` tool to display current roots
- Logs roots-related events for demonstration purposes

Note: This server doesn't actually access files, but demonstrates how servers can interact with the roots protocol for clients that need to understand which directories are available for file operations.

### Logging

The server sends random-leveled log messages every 15 seconds, e.g.:

```json
{
  "method": "notifications/message",
  "params": {
	"level": "info",
	"data": "Info-level message"
  }
}
```

## Usage with Claude Desktop (uses [stdio Transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#stdio))

Add to your `claude_desktop_config.json`:

```json
{
  "mcpServers": {
    "everything": {
      "command": "npx",
      "args": [
        "-y",
        "@modelcontextprotocol/server-everything"
      ]
    }
  }
}
```

## Usage with VS Code

For quick installation, use of of the one-click install buttons below...

[![Install with NPX in VS Code](https://img.shields.io/badge/VS_Code-NPM-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-everything%22%5D%7D) [![Install with NPX in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-NPM-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-everything%22%5D%7D&quality=insiders)

[![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Feverything%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Feverything%22%5D%7D&quality=insiders)

For manual installation, you can configure the MCP server using one of these methods:

**Method 1: User Configuration (Recommended)**
Add the configuration to your user-level MCP configuration file. Open the Command Palette (`Ctrl + Shift + P`) and run `MCP: Open User Configuration`. This will open your user `mcp.json` file where you can add the server configuration.

**Method 2: Workspace Configuration**
Alternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.

> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/mcp).

#### NPX

```json
{
  "servers": {
    "everything": {
      "command": "npx",
      "args": ["-y", "@modelcontextprotocol/server-everything"]
    }
  }
}
```

## Running from source with [HTTP+SSE Transport](https://modelcontextprotocol.io/specification/2024-11-05/basic/transports#http-with-sse) (deprecated as of [2025-03-26](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports))

```shell
cd src/everything
npm install
npm run start:sse
```

## Run from source with [Streamable HTTP Transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http)

```shell
cd src/everything
npm install
npm run start:streamableHttp
```

## Running as an installed package
### Install 
```shell
npm install -g @modelcontextprotocol/server-everything@latest
````

### Run the default (stdio) server
```shell
npx @modelcontextprotocol/server-everything
```

### Or specify stdio explicitly
```shell
npx @modelcontextprotocol/server-everything stdio
```

### Run the SSE server
```shell
npx @modelcontextprotocol/server-everything sse
```

### Run the streamable HTTP server
```shell
npx @modelcontextprotocol/server-everything streamableHttp
```



## /src/everything/everything.ts

```ts path="/src/everything/everything.ts" 
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import type { RequestHandlerExtra } from "@modelcontextprotocol/sdk/shared/protocol.js";
import {
  CallToolRequestSchema,
  ClientCapabilities,
  CompleteRequestSchema,
  CreateMessageRequest,
  CreateMessageResultSchema,
  ElicitResultSchema,
  GetPromptRequestSchema,
  ListPromptsRequestSchema,
  ListResourcesRequestSchema,
  ListResourceTemplatesRequestSchema,
  ListToolsRequestSchema,
  LoggingLevel,
  ReadResourceRequestSchema,
  Resource,
  RootsListChangedNotificationSchema,
  ServerNotification,
  ServerRequest,
  SubscribeRequestSchema,
  Tool,
  ToolSchema,
  UnsubscribeRequestSchema,
  type Root
} from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { readFileSync } from "fs";
import { fileURLToPath } from "url";
import { dirname, join } from "path";
import JSZip from "jszip";

const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const instructions = readFileSync(join(__dirname, "instructions.md"), "utf-8");

const ToolInputSchema = ToolSchema.shape.inputSchema;
type ToolInput = z.infer<typeof ToolInputSchema>;

const ToolOutputSchema = ToolSchema.shape.outputSchema;
type ToolOutput = z.infer<typeof ToolOutputSchema>;

type SendRequest = RequestHandlerExtra<ServerRequest, ServerNotification>["sendRequest"];

/* Input schemas for tools implemented in this server */
const EchoSchema = z.object({
  message: z.string().describe("Message to echo"),
});

const AddSchema = z.object({
  a: z.number().describe("First number"),
  b: z.number().describe("Second number"),
});

const LongRunningOperationSchema = z.object({
  duration: z
    .number()
    .default(10)
    .describe("Duration of the operation in seconds"),
  steps: z
    .number()
    .default(5)
    .describe("Number of steps in the operation"),
});

const PrintEnvSchema = z.object({});

const SampleLLMSchema = z.object({
  prompt: z.string().describe("The prompt to send to the LLM"),
  maxTokens: z
    .number()
    .default(100)
    .describe("Maximum number of tokens to generate"),
});

const GetTinyImageSchema = z.object({});

const AnnotatedMessageSchema = z.object({
  messageType: z
    .enum(["error", "success", "debug"])
    .describe("Type of message to demonstrate different annotation patterns"),
  includeImage: z
    .boolean()
    .default(false)
    .describe("Whether to include an example image"),
});

const GetResourceReferenceSchema = z.object({
  resourceId: z
    .number()
    .min(1)
    .max(100)
    .describe("ID of the resource to reference (1-100)"),
});

const ElicitationSchema = z.object({});

const GetResourceLinksSchema = z.object({
  count: z
    .number()
    .min(1)
    .max(10)
    .default(3)
    .describe("Number of resource links to return (1-10)"),
});

const ListRootsSchema = z.object({});

const StructuredContentSchema = {
  input: z.object({
    location: z
      .string()
      .trim()
      .min(1)
      .describe("City name or zip code"),
  }),

  output: z.object({
    temperature: z
      .number()
      .describe("Temperature in celsius"),
    conditions: z
      .string()
      .describe("Weather conditions description"),
    humidity: z
      .number()
      .describe("Humidity percentage"),
  })
};

const ZipResourcesInputSchema = z.object({
  files: z.record(z.string().url().describe("URL of the file to include in the zip")).describe("Mapping of file names to URLs to include in the zip"),
});

enum ToolName {
  ECHO = "echo",
  ADD = "add",
  LONG_RUNNING_OPERATION = "longRunningOperation",
  PRINT_ENV = "printEnv",
  SAMPLE_LLM = "sampleLLM",
  GET_TINY_IMAGE = "getTinyImage",
  ANNOTATED_MESSAGE = "annotatedMessage",
  GET_RESOURCE_REFERENCE = "getResourceReference",
  ELICITATION = "startElicitation",
  GET_RESOURCE_LINKS = "getResourceLinks",
  STRUCTURED_CONTENT = "structuredContent",
  ZIP_RESOURCES = "zip",
  LIST_ROOTS = "listRoots"
}

enum PromptName {
  SIMPLE = "simple_prompt",
  COMPLEX = "complex_prompt",
  RESOURCE = "resource_prompt",
}

// Example completion values
const EXAMPLE_COMPLETIONS = {
  style: ["casual", "formal", "technical", "friendly"],
  temperature: ["0", "0.5", "0.7", "1.0"],
  resourceId: ["1", "2", "3", "4", "5"],
};

export const createServer = () => {
  const server = new Server(
    {
      name: "example-servers/everything",
      title: "Everything Example Server",
      version: "1.0.0",
    },
    {
      capabilities: {
        prompts: {},
        resources: { subscribe: true },
        tools: {},
        logging: {},
        completions: {}
      },
      instructions
    }
  );

  let subscriptions: Set<string> = new Set();
  let subsUpdateInterval: NodeJS.Timeout | undefined;
  let stdErrUpdateInterval: NodeJS.Timeout | undefined;

  let logsUpdateInterval: NodeJS.Timeout | undefined;
  // Store client capabilities
  let clientCapabilities: ClientCapabilities | undefined;

  // Roots state management
  let currentRoots: Root[] = [];
  let clientSupportsRoots = false;
  let sessionId: string | undefined;

    // Function to start notification intervals when a client connects
  const startNotificationIntervals = (sid?: string|undefined) => {
      sessionId = sid;
      if (!subsUpdateInterval) {
        subsUpdateInterval = setInterval(() => {
          for (const uri of subscriptions) {
            server.notification({
              method: "notifications/resources/updated",
              params: { uri },
            });
          }
        }, 10000);
      }

      const maybeAppendSessionId = sessionId ? ` - SessionId ${sessionId}`: "";
      const messages: { level: LoggingLevel; data: string }[] = [
          { level: "debug", data: `Debug-level message${maybeAppendSessionId}` },
          { level: "info", data: `Info-level message${maybeAppendSessionId}` },
          { level: "notice", data: `Notice-level message${maybeAppendSessionId}` },
          { level: "warning", data: `Warning-level message${maybeAppendSessionId}` },
          { level: "error", data: `Error-level message${maybeAppendSessionId}` },
          { level: "critical", data: `Critical-level message${maybeAppendSessionId}` },
          { level: "alert", data: `Alert level-message${maybeAppendSessionId}` },
          { level: "emergency", data: `Emergency-level message${maybeAppendSessionId}` },
      ];

      if (!logsUpdateInterval) {
          console.error("Starting logs update interval");
          logsUpdateInterval = setInterval(async () => {
          await server.sendLoggingMessage( messages[Math.floor(Math.random() * messages.length)], sessionId);
      }, 15000);
    }
  };

  // Helper method to request sampling from client
  const requestSampling = async (
    context: string,
    uri: string,
    maxTokens: number = 100,
    sendRequest: SendRequest
  ) => {
    const request: CreateMessageRequest = {
      method: "sampling/createMessage",
      params: {
        messages: [
          {
            role: "user",
            content: {
              type: "text",
              text: `Resource ${uri} context: ${context}`,
            },
          },
        ],
        systemPrompt: "You are a helpful test server.",
        maxTokens,
        temperature: 0.7,
        includeContext: "thisServer",
      },
    };

    return await sendRequest(request, CreateMessageResultSchema);

  };

  const ALL_RESOURCES: Resource[] = Array.from({ length: 100 }, (_, i) => {
    const uri = `test://static/resource/${i + 1}`;
    if (i % 2 === 0) {
      return {
        uri,
        name: `Resource ${i + 1}`,
        mimeType: "text/plain",
        text: `Resource ${i + 1}: This is a plaintext resource`,
      };
    } else {
      const buffer = Buffer.from(`Resource ${i + 1}: This is a base64 blob`);
      return {
        uri,
        name: `Resource ${i + 1}`,
        mimeType: "application/octet-stream",
        blob: buffer.toString("base64"),
      };
    }
  });

  const PAGE_SIZE = 10;

  server.setRequestHandler(ListResourcesRequestSchema, async (request) => {
    const cursor = request.params?.cursor;
    let startIndex = 0;

    if (cursor) {
      const decodedCursor = parseInt(atob(cursor), 10);
      if (!isNaN(decodedCursor)) {
        startIndex = decodedCursor;
      }
    }

    const endIndex = Math.min(startIndex + PAGE_SIZE, ALL_RESOURCES.length);
    const resources = ALL_RESOURCES.slice(startIndex, endIndex);

    let nextCursor: string | undefined;
    if (endIndex < ALL_RESOURCES.length) {
      nextCursor = btoa(endIndex.toString());
    }

    return {
      resources,
      nextCursor,
    };
  });

  server.setRequestHandler(ListResourceTemplatesRequestSchema, async () => {
    return {
      resourceTemplates: [
        {
          uriTemplate: "test://static/resource/{id}",
          name: "Static Resource",
          description: "A static resource with a numeric ID",
        },
      ],
    };
  });

  server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
    const uri = request.params.uri;

    if (uri.startsWith("test://static/resource/")) {
      const index = parseInt(uri.split("/").pop() ?? "", 10) - 1;
      if (index >= 0 && index < ALL_RESOURCES.length) {
        const resource = ALL_RESOURCES[index];
        return {
          contents: [resource],
        };
      }
    }

    throw new Error(`Unknown resource: ${uri}`);
  });

  server.setRequestHandler(SubscribeRequestSchema, async (request, extra) => {
    const { uri } = request.params;
    subscriptions.add(uri);
    return {};
  });

  server.setRequestHandler(UnsubscribeRequestSchema, async (request) => {
    subscriptions.delete(request.params.uri);
    return {};
  });

  server.setRequestHandler(ListPromptsRequestSchema, async () => {
    return {
      prompts: [
        {
          name: PromptName.SIMPLE,
          description: "A prompt without arguments",
        },
        {
          name: PromptName.COMPLEX,
          description: "A prompt with arguments",
          arguments: [
            {
              name: "temperature",
              description: "Temperature setting",
              required: true,
            },
            {
              name: "style",
              description: "Output style",
              required: false,
            },
          ],
        },
        {
          name: PromptName.RESOURCE,
          description: "A prompt that includes an embedded resource reference",
          arguments: [
            {
              name: "resourceId",
              description: "Resource ID to include (1-100)",
              required: true,
            },
          ],
        },
      ],
    };
  });

  server.setRequestHandler(GetPromptRequestSchema, async (request) => {
    const { name, arguments: args } = request.params;

    if (name === PromptName.SIMPLE) {
      return {
        messages: [
          {
            role: "user",
            content: {
              type: "text",
              text: "This is a simple prompt without arguments.",
            },
          },
        ],
      };
    }

    if (name === PromptName.COMPLEX) {
      return {
        messages: [
          {
            role: "user",
            content: {
              type: "text",
              text: `This is a complex prompt with arguments: temperature=${args?.temperature}, style=${args?.style}`,
            },
          },
          {
            role: "assistant",
            content: {
              type: "text",
              text: "I understand. You've provided a complex prompt with temperature and style arguments. How would you like me to proceed?",
            },
          },
          {
            role: "user",
            content: {
              type: "image",
              data: MCP_TINY_IMAGE,
              mimeType: "image/png",
            },
          },
        ],
      };
    }

    if (name === PromptName.RESOURCE) {
      const resourceId = parseInt(args?.resourceId as string, 10);
      if (isNaN(resourceId) || resourceId < 1 || resourceId > 100) {
        throw new Error(
          `Invalid resourceId: ${args?.resourceId}. Must be a number between 1 and 100.`
        );
      }

      const resourceIndex = resourceId - 1;
      const resource = ALL_RESOURCES[resourceIndex];

      return {
        messages: [
          {
            role: "user",
            content: {
              type: "text",
              text: `This prompt includes Resource ${resourceId}. Please analyze the following resource:`,
            },
          },
          {
            role: "user",
            content: {
              type: "resource",
              resource: resource,
            },
          },
        ],
      };
    }

    throw new Error(`Unknown prompt: ${name}`);
  });

  server.setRequestHandler(ListToolsRequestSchema, async () => {
    const tools: Tool[] = [
      {
        name: ToolName.ECHO,
        description: "Echoes back the input",
        inputSchema: zodToJsonSchema(EchoSchema) as ToolInput,
      },
      {
        name: ToolName.ADD,
        description: "Adds two numbers",
        inputSchema: zodToJsonSchema(AddSchema) as ToolInput,
      },
      {
        name: ToolName.LONG_RUNNING_OPERATION,
        description:
          "Demonstrates a long running operation with progress updates",
        inputSchema: zodToJsonSchema(LongRunningOperationSchema) as ToolInput,
      },
      {
        name: ToolName.PRINT_ENV,
        description:
          "Prints all environment variables, helpful for debugging MCP server configuration",
        inputSchema: zodToJsonSchema(PrintEnvSchema) as ToolInput,
      },
      {
        name: ToolName.SAMPLE_LLM,
        description: "Samples from an LLM using MCP's sampling feature",
        inputSchema: zodToJsonSchema(SampleLLMSchema) as ToolInput,
      },
      {
        name: ToolName.GET_TINY_IMAGE,
        description: "Returns the MCP_TINY_IMAGE",
        inputSchema: zodToJsonSchema(GetTinyImageSchema) as ToolInput,
      },
      {
        name: ToolName.ANNOTATED_MESSAGE,
        description:
          "Demonstrates how annotations can be used to provide metadata about content",
        inputSchema: zodToJsonSchema(AnnotatedMessageSchema) as ToolInput,
      },
      {
        name: ToolName.GET_RESOURCE_REFERENCE,
        description:
          "Returns a resource reference that can be used by MCP clients",
        inputSchema: zodToJsonSchema(GetResourceReferenceSchema) as ToolInput,
      },
      {
        name: ToolName.GET_RESOURCE_LINKS,
        description:
          "Returns multiple resource links that reference different types of resources",
        inputSchema: zodToJsonSchema(GetResourceLinksSchema) as ToolInput,
      },
      {
        name: ToolName.STRUCTURED_CONTENT,
        description:
          "Returns structured content along with an output schema for client data validation",
        inputSchema: zodToJsonSchema(StructuredContentSchema.input) as ToolInput,
        outputSchema: zodToJsonSchema(StructuredContentSchema.output) as ToolOutput,
      },
      {
        name: ToolName.ZIP_RESOURCES,
        description: "Compresses the provided resource files (mapping of name to URI, which can be a data URI) to a zip file, which it returns as a data URI resource link.",
        inputSchema: zodToJsonSchema(ZipResourcesInputSchema) as ToolInput,
      }
    ];
    if (clientCapabilities!.roots) tools.push ({
        name: ToolName.LIST_ROOTS,
        description:
            "Lists the current MCP roots provided by the client. Demonstrates the roots protocol capability even though this server doesn't access files.",
        inputSchema: zodToJsonSchema(ListRootsSchema) as ToolInput,
    });
    if (clientCapabilities!.elicitation) tools.push ({
        name: ToolName.ELICITATION,
        description: "Elicitation test tool that demonstrates how to request user input with various field types (string, boolean, email, uri, date, integer, number, enum)",
        inputSchema: zodToJsonSchema(ElicitationSchema) as ToolInput,
    });

    return { tools };
  });

  server.setRequestHandler(CallToolRequestSchema, async (request,extra) => {
    const { name, arguments: args } = request.params;

    if (name === ToolName.ECHO) {
      const validatedArgs = EchoSchema.parse(args);
      return {
        content: [{ type: "text", text: `Echo: ${validatedArgs.message}` }],
      };
    }

    if (name === ToolName.ADD) {
      const validatedArgs = AddSchema.parse(args);
      const sum = validatedArgs.a + validatedArgs.b;
      return {
        content: [
          {
            type: "text",
            text: `The sum of ${validatedArgs.a} and ${validatedArgs.b} is ${sum}.`,
          },
        ],
      };
    }

    if (name === ToolName.LONG_RUNNING_OPERATION) {
      const validatedArgs = LongRunningOperationSchema.parse(args);
      const { duration, steps } = validatedArgs;
      const stepDuration = duration / steps;
      const progressToken = request.params._meta?.progressToken;

      for (let i = 1; i < steps + 1; i++) {
        await new Promise((resolve) =>
          setTimeout(resolve, stepDuration * 1000)
        );

        if (progressToken !== undefined) {
          await server.notification({
            method: "notifications/progress",
            params: {
              progress: i,
              total: steps,
              progressToken,
            },
          },{relatedRequestId: extra.requestId});
        }
      }

      return {
        content: [
          {
            type: "text",
            text: `Long running operation completed. Duration: ${duration} seconds, Steps: ${steps}.`,
          },
        ],
      };
    }

    if (name === ToolName.PRINT_ENV) {
      return {
        content: [
          {
            type: "text",
            text: JSON.stringify(process.env, null, 2),
          },
        ],
      };
    }

    if (name === ToolName.SAMPLE_LLM) {
      const validatedArgs = SampleLLMSchema.parse(args);
      const { prompt, maxTokens } = validatedArgs;

      const result = await requestSampling(
        prompt,
        ToolName.SAMPLE_LLM,
        maxTokens,
        extra.sendRequest
      );
      return {
        content: [
          { type: "text", text: `LLM sampling result: ${result.content.text}` },
        ],
      };
    }

    if (name === ToolName.GET_TINY_IMAGE) {
      GetTinyImageSchema.parse(args);
      return {
        content: [
          {
            type: "text",
            text: "This is a tiny image:",
          },
          {
            type: "image",
            data: MCP_TINY_IMAGE,
            mimeType: "image/png",
          },
          {
            type: "text",
            text: "The image above is the MCP tiny image.",
          },
        ],
      };
    }

    if (name === ToolName.ANNOTATED_MESSAGE) {
      const { messageType, includeImage } = AnnotatedMessageSchema.parse(args);

      const content = [];

      // Main message with different priorities/audiences based on type
      if (messageType === "error") {
        content.push({
          type: "text",
          text: "Error: Operation failed",
          annotations: {
            priority: 1.0, // Errors are highest priority
            audience: ["user", "assistant"], // Both need to know about errors
          },
        });
      } else if (messageType === "success") {
        content.push({
          type: "text",
          text: "Operation completed successfully",
          annotations: {
            priority: 0.7, // Success messages are important but not critical
            audience: ["user"], // Success mainly for user consumption
          },
        });
      } else if (messageType === "debug") {
        content.push({
          type: "text",
          text: "Debug: Cache hit ratio 0.95, latency 150ms",
          annotations: {
            priority: 0.3, // Debug info is low priority
            audience: ["assistant"], // Technical details for assistant
          },
        });
      }

      // Optional image with its own annotations
      if (includeImage) {
        content.push({
          type: "image",
          data: MCP_TINY_IMAGE,
          mimeType: "image/png",
          annotations: {
            priority: 0.5,
            audience: ["user"], // Images primarily for user visualization
          },
        });
      }

      return { content };
    }

    if (name === ToolName.GET_RESOURCE_REFERENCE) {
      const validatedArgs = GetResourceReferenceSchema.parse(args);
      const resourceId = validatedArgs.resourceId;

      const resourceIndex = resourceId - 1;
      if (resourceIndex < 0 || resourceIndex >= ALL_RESOURCES.length) {
        throw new Error(`Resource with ID ${resourceId} does not exist`);
      }

      const resource = ALL_RESOURCES[resourceIndex];

      return {
        content: [
          {
            type: "text",
            text: `Returning resource reference for Resource ${resourceId}:`,
          },
          {
            type: "resource",
            resource: resource,
          },
          {
            type: "text",
            text: `You can access this resource using the URI: ${resource.uri}`,
          },
        ],
      };
    }

    if (name === ToolName.ELICITATION) {
      ElicitationSchema.parse(args);

      const elicitationResult = await extra.sendRequest({
        method: 'elicitation/create',
        params: {
          message: 'Please provide inputs for the following fields:',
          requestedSchema: {
            type: 'object',
            properties: {
              name: {
                title: 'Full Name',
                type: 'string',
                description: 'Your full, legal name',
              },
              check: {
                title: 'Agree to terms',
                type: 'boolean',
                description: 'A boolean check',
              },
              color: {
                title: 'Favorite Color',
                type: 'string',
                description: 'Favorite color (open text)',
                default: 'blue',
              },
              email: {
                title: 'Email Address',
                type: 'string',
                format: 'email',
                description: 'Your email address (will be verified, and never shared with anyone else)',
              },
              homepage: {
                type: 'string',
                format: 'uri',
                description: 'Homepage / personal site',
              },
              birthdate: {
                title: 'Birthdate',
                type: 'string',
                format: 'date',
                description: 'Your date of birth (will never be shared with anyone else)',
              },
              integer: {
                title: 'Favorite Integer',
                type: 'integer',
                description: 'Your favorite integer (do not give us your phone number, pin, or other sensitive info)',
                minimum: 1,
                maximum: 100,
                default: 42,
              },
              number: {
                title: 'Favorite Number',
                type: 'number',
                description: 'Favorite number (there are no wrong answers)',
                minimum: 0,
                maximum: 1000,
                default: 3.14,
              },
              petType: {
                title: 'Pet type',
                type: 'string',
                enum: ['cats', 'dogs', 'birds', 'fish', 'reptiles'],
                enumNames: ['Cats', 'Dogs', 'Birds', 'Fish', 'Reptiles'],
                default: 'dogs',
                description: 'Your favorite pet type',
              },
            },
            required: ['name'],
          },
        },
      }, ElicitResultSchema, { timeout: 10 * 60 * 1000 /* 10 minutes */ });

      // Handle different response actions
      const content = [];

      if (elicitationResult.action === 'accept' && elicitationResult.content) {
        content.push({
          type: "text",
          text: `✅ User provided the requested information!`,
        });

        // Only access elicitationResult.content when action is accept
        const userData = elicitationResult.content;
        const lines = [];
        if (userData.name) lines.push(`- Name: ${userData.name}`);
        if (userData.check !== undefined) lines.push(`- Agreed to terms: ${userData.check}`);
        if (userData.color) lines.push(`- Favorite Color: ${userData.color}`);
        if (userData.email) lines.push(`- Email: ${userData.email}`);
        if (userData.homepage) lines.push(`- Homepage: ${userData.homepage}`);
        if (userData.birthdate) lines.push(`- Birthdate: ${userData.birthdate}`);
        if (userData.integer !== undefined) lines.push(`- Favorite Integer: ${userData.integer}`);
        if (userData.number !== undefined) lines.push(`- Favorite Number: ${userData.number}`);
        if (userData.petType) lines.push(`- Pet Type: ${userData.petType}`);

        content.push({
          type: "text",
          text: `User inputs:\n${lines.join('\n')}`,
        });
      } else if (elicitationResult.action === 'decline') {
        content.push({
          type: "text",
          text: `❌ User declined to provide the requested information.`,
        });
      } else if (elicitationResult.action === 'cancel') {
        content.push({
          type: "text",
          text: `⚠️ User cancelled the elicitation dialog.`,
        });
      }

      // Include raw result for debugging
      content.push({
        type: "text",
        text: `\nRaw result: ${JSON.stringify(elicitationResult, null, 2)}`,
      });

      return { content };
    }

    if (name === ToolName.GET_RESOURCE_LINKS) {
      const { count } = GetResourceLinksSchema.parse(args);
      const content = [];

      // Add intro text
      content.push({
        type: "text",
        text: `Here are ${count} resource links to resources available in this server (see full output in tool response if your client does not support resource_link yet):`,
      });

      // Return resource links to actual resources from ALL_RESOURCES
      const actualCount = Math.min(count, ALL_RESOURCES.length);
      for (let i = 0; i < actualCount; i++) {
        const resource = ALL_RESOURCES[i];
        content.push({
          type: "resource_link",
          uri: resource.uri,
          name: resource.name,
          description: `Resource ${i + 1}: ${resource.mimeType === "text/plain"
            ? "plaintext resource"
            : "binary blob resource"
            }`,
          mimeType: resource.mimeType,
        });
      }

      return { content };
    }

    if (name === ToolName.STRUCTURED_CONTENT) {
      // The same response is returned for every input.
      const validatedArgs = StructuredContentSchema.input.parse(args);

      const weather = {
        temperature: 22.5,
        conditions: "Partly cloudy",
        humidity: 65
      }

      const backwardCompatiblecontent = {
        type: "text",
        text: JSON.stringify(weather)
      }

      return {
        content: [backwardCompatiblecontent],
        structuredContent: weather
      };
    }

    if (name === ToolName.ZIP_RESOURCES) {
      const { files } = ZipResourcesInputSchema.parse(args);

      const zip = new JSZip();

      for (const [fileName, fileUrl] of Object.entries(files)) {
        try {
          const response = await fetch(fileUrl);
          if (!response.ok) {
            throw new Error(`Failed to fetch ${fileUrl}: ${response.statusText}`);
          }
          const arrayBuffer = await response.arrayBuffer();
          zip.file(fileName, arrayBuffer);
        } catch (error) {
          throw new Error(`Error fetching file ${fileUrl}: ${error instanceof Error ? error.message : String(error)}`);
        }
      }

      const uri = `data:application/zip;base64,${await zip.generateAsync({ type: "base64" })}`;

      return {
        content: [
          {
            type: "resource_link",
            mimeType: "application/zip",
            uri,
          },
        ],
      };
    }

    if (name === ToolName.LIST_ROOTS) {
      ListRootsSchema.parse(args);

      if (!clientSupportsRoots) {
        return {
          content: [
            {
              type: "text",
              text: "The MCP client does not support the roots protocol.\n\n" +
                "This means the server cannot access information about the client's workspace directories or file system roots."
            }
          ]
        };
      }

      if (currentRoots.length === 0) {
        return {
          content: [
            {
              type: "text",
              text: "The client supports roots but no roots are currently configured.\n\n" +
                "This could mean:\n" +
                "1. The client hasn't provided any roots yet\n" +
                "2. The client provided an empty roots list\n" +
                "3. The roots configuration is still being loaded"
            }
          ]
        };
      }

      const rootsList = currentRoots.map((root, index) => {
        return `${index + 1}. ${root.name || 'Unnamed Root'}\n   URI: ${root.uri}`;
      }).join('\n\n');

      return {
        content: [
          {
            type: "text",
            text: `Current MCP Roots (${currentRoots.length} total):\n\n${rootsList}\n\n` +
              "Note: This server demonstrates the roots protocol capability but doesn't actually access files. " +
              "The roots are provided by the MCP client and can be used by servers that need file system access."
          }
        ]
      };
    }

    throw new Error(`Unknown tool: ${name}`);
  });

  server.setRequestHandler(CompleteRequestSchema, async (request) => {
    const { ref, argument } = request.params;

    if (ref.type === "ref/resource") {
      const resourceId = ref.uri.split("/").pop();
      if (!resourceId) return { completion: { values: [] } };

      // Filter resource IDs that start with the input value
      const values = EXAMPLE_COMPLETIONS.resourceId.filter((id) =>
        id.startsWith(argument.value)
      );
      return { completion: { values, hasMore: false, total: values.length } };
    }

    if (ref.type === "ref/prompt") {
      // Handle completion for prompt arguments
      const completions =
        EXAMPLE_COMPLETIONS[argument.name as keyof typeof EXAMPLE_COMPLETIONS];
      if (!completions) return { completion: { values: [] } };

      const values = completions.filter((value) =>
        value.startsWith(argument.value)
      );
      return { completion: { values, hasMore: false, total: values.length } };
    }

    throw new Error(`Unknown reference type`);
  });

  // Roots protocol handlers
  server.setNotificationHandler(RootsListChangedNotificationSchema, async () => {
    try {
      // Request the updated roots list from the client
      const response = await server.listRoots();
      if (response && 'roots' in response) {
        currentRoots = response.roots;

        // Log the roots update for demonstration
        await server.sendLoggingMessage({
            level: "info",
            logger: "everything-server",
            data: `Roots updated: ${currentRoots.length} root(s) received from client`,
        }, sessionId);
      }
    } catch (error) {
      await server.sendLoggingMessage({
          level: "error",
          logger: "everything-server",
          data: `Failed to request roots from client: ${error instanceof Error ? error.message : String(error)}`,
      }, sessionId);
    }
  });

  // Handle post-initialization setup for roots
  server.oninitialized = async () => {
   clientCapabilities = server.getClientCapabilities();

    if (clientCapabilities?.roots) {
      clientSupportsRoots = true;
      try {
        const response = await server.listRoots();
        if (response && 'roots' in response) {
          currentRoots = response.roots;

          await server.sendLoggingMessage({
              level: "info",
              logger: "everything-server",
              data: `Initial roots received: ${currentRoots.length} root(s) from client`,
          }, sessionId);
        } else {
          await server.sendLoggingMessage({
              level: "warning",
              logger: "everything-server",
              data: "Client returned no roots set",
          }, sessionId);
        }
      } catch (error) {
        await server.sendLoggingMessage({
            level: "error",
            logger: "everything-server",
            data: `Failed to request initial roots from client: ${error instanceof Error ? error.message : String(error)}`,
        }, sessionId);
      }
    } else {
      await server.sendLoggingMessage({
          level: "info",
          logger: "everything-server",
          data: "Client does not support MCP roots protocol",
      }, sessionId);
    }
  };

  const cleanup = async () => {
    if (subsUpdateInterval) clearInterval(subsUpdateInterval);
    if (logsUpdateInterval) clearInterval(logsUpdateInterval);
    if (stdErrUpdateInterval) clearInterval(stdErrUpdateInterval);
  };

  return { server, cleanup, startNotificationIntervals };
};

const MCP_TINY_IMAGE =
  "iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAKsGlDQ1BJQ0MgUHJvZmlsZQAASImVlwdUU+kSgOfe9JDQEiIgJfQmSCeAlBBaAAXpYCMkAUKJMRBU7MriClZURLCs6KqIgo0idizYFsWC3QVZBNR1sWDDlXeBQ9jdd9575805c+a7c+efmf+e/z9nLgCdKZDJMlF1gCxpjjwyyI8dn5DIJvUABRiY0kBdIMyWcSMiwgCTUft3+dgGyJC9YzuU69/f/1fREImzhQBIBMbJomxhFsbHMe0TyuQ5ALg9mN9kbo5siK9gzJRjDWL8ZIhTR7hviJOHGY8fjomO5GGsDUCmCQTyVACaKeZn5wpTsTw0f4ztpSKJFGPsGbyzsmaLMMbqgiUWI8N4KD8n+S95Uv+WM1mZUyBIVfLIXoaF7C/JlmUK5v+fn+N/S1amYrSGOaa0NHlwJGaxvpAHGbNDlSxNnhI+yhLRcPwwpymCY0ZZmM1LHGWRwD9UuTZzStgop0gC+co8OfzoURZnB0SNsnx2pLJWipzHHWWBfKyuIiNG6U8T85X589Ki40Y5VxI7ZZSzM6JCx2J4Sr9cEansXywN8hurG6jce1b2X/Yr4SvX5qRFByv3LhjrXyzljuXMjlf2JhL7B4zFxCjjZTl+ylqyzAhlvDgzSOnPzo1Srs3BDuTY2gjlN0wXhESMMoRBELAhBjIhB+QggECQgBTEOeJ5Q2cUeLNl8+WS1LQcNhe7ZWI2Xyq0m8B2tHd0Bhi6syNH4j1r+C4irGtjvhWVAF4nBgcHT475Qm4BHEkCoNaO+SxnAKh3A1w5JVTIc0d8Q9cJCEAFNWCCDhiACViCLTiCK3iCLwRACIRDNCTATBBCGmRhnc+FhbAMCqAI1sNmKIOdsBv2wyE4CvVwCs7DZbgOt+AePIZ26IJX0AcfYQBBEBJCRxiIDmKImCE2iCPCQbyRACQMiUQSkCQkFZEiCmQhsgIpQoqRMmQXUokcQU4g55GrSCvyEOlAepF3yFcUh9JQJqqPmqMTUQ7KRUPRaHQGmorOQfPQfHQtWopWoAfROvQ8eh29h7ajr9B+HOBUcCycEc4Wx8HxcOG4RFwKTo5bjCvEleAqcNW4Rlwz7g6uHfca9wVPxDPwbLwt3hMfjI/BC/Fz8Ivxq/Fl+P34OvxF/B18B74P/51AJ+gRbAgeBD4hnpBKmEsoIJQQ9hJqCZcI9whdhI9EIpFFtCC6EYOJCcR04gLiauJ2Yg3xHLGV2EnsJ5FIOiQbkhcpnCQg5ZAKSFtJB0lnSbdJXaTPZBWyIdmRHEhOJEvJy8kl5APkM+Tb5G7yAEWdYkbxoIRTRJT5lHWUPZRGyk1KF2WAqkG1oHpRo6np1GXUUmo19RL1CfW9ioqKsYq7ylQVicpSlVKVwypXVDpUvtA0adY0Hm06TUFbS9tHO0d7SHtPp9PN6b70RHoOfS29kn6B/oz+WZWhaqfKVxWpLlEtV61Tva36Ro2iZqbGVZuplqdWonZM7abaa3WKurk6T12gvli9XP2E+n31fg2GhoNGuEaWxmqNAxpXNXo0SZrmmgGaIs18zd2aFzQ7GTiGCYPHEDJWMPYwLjG6mESmBZPPTGcWMQ8xW5h9WppazlqxWvO0yrVOa7WzcCxzFp+VyVrHOspqY30dpz+OO048btW46nG3x33SHq/tqy3WLtSu0b6n/VWHrROgk6GzQade56kuXtdad6ruXN0dupd0X49njvccLxxfOP7o+Ed6qJ61XqTeAr3dejf0+vUN9IP0Zfpb9S/ovzZgGfgapBtsMjhj0GvIMPQ2lBhuMjxr+JKtxeayM9ml7IvsPiM9o2AjhdEuoxajAWML4xjj5cY1xk9NqCYckxSTTSZNJn2mhqaTTReaVpk+MqOYcczSzLaYNZt9MrcwjzNfaV5v3mOhbcG3yLOosnhiSbf0sZxjWWF514poxbHKsNpudcsatXaxTrMut75pg9q42khsttu0TiBMcJ8gnVAx4b4tzZZrm2tbZdthx7ILs1tuV2/3ZqLpxMSJGyY2T/xu72Kfab/H/rGDpkOIw3KHRod3jtaOQsdyx7tOdKdApyVODU5vnW2cxc47nB+4MFwmu6x0aXL509XNVe5a7drrZuqW5LbN7T6HyYngrOZccSe4+7kvcT/l/sXD1SPH46jHH562nhmeBzx7JllMEk/aM6nTy9hL4LXLq92b7Z3k/ZN3u4+Rj8Cnwue5r4mvyHevbzfXipvOPch942fvJ/er9fvE8+At4p3zx/kH+Rf6twRoBsQElAU8CzQOTA2sCuwLcglaEHQumBAcGrwh+D5fny/kV/L7QtxCFoVcDKWFRoWWhT4Psw6ThzVORieHTN44+ckUsynSKfXhEM4P3xj+NMIiYk7EyanEqRFTy6e+iHSIXBjZHMWImhV1IOpjtF/0uujHMZYxipimWLXY6bGVsZ/i/OOK49rjJ8Yvir+eoJsgSWhIJCXGJu5N7J8WMG3ztK7pLtMLprfNsJgxb8bVmbozM2eenqU2SzDrWBIhKS7pQNI3QbigQtCfzE/eltwn5Am3CF+JfEWbRL1iL3GxuDvFK6U4pSfVK3Vjam+aT1pJ2msJT1ImeZsenL4z/VNGeMa+jMHMuMyaLHJWUtYJqaY0Q3pxtsHsebNbZTayAln7HI85m+f0yUPle7OR7BnZDTlMbDi6obBU/KDoyPXOLc/9PDd27rF5GvOk827Mt56/an53XmDezwvwC4QLmhYaLVy2sGMRd9Guxcji5MVNS0yW5C/pWhq0dP8y6rKMZb8st19evPzDirgVjfn6+UvzO38I+qGqQLVAXnB/pefKnT/if5T82LLKadXWVd8LRYXXiuyLSoq+rRauvrbGYU3pmsG1KWtb1rmu27GeuF66vm2Dz4b9xRrFecWdGydvrNvE3lS46cPmWZuvljiX7NxC3aLY0l4aVtqw1XTr+q3fytLK7pX7ldds09u2atun7aLtt3f47qjeqb+zaOfXnyQ/PdgVtKuuwryiZDdxd+7uF3ti9zT/zPm5cq/u3qK9f+6T7mvfH7n/YqVbZeUBvQPrqtAqRVXvwekHbx3yP9RQbVu9q4ZVU3QYDisOvzySdKTtaOjRpmOcY9XHzY5vq2XUFtYhdfPr+urT6tsbEhpaT4ScaGr0bKw9aXdy3ymjU+WntU6vO0M9k39m8Gze2f5zsnOvz6ee72ya1fT4QvyFuxenXmy5FHrpyuXAyxeauc1nr3hdOXXV4+qJa5xr9dddr9fdcLlR+4vLL7Utri11N91uNtzyv9XYOqn1zG2f2+fv+N+5fJd/9/q9Kfda22LaHtyffr/9gehBz8PMh28f5T4aeLz0CeFJ4VP1pyXP9J5V/Gr1a027a/vpDv+OG8+jnj/uFHa++i37t29d+S/oL0q6Dbsrexx7TvUG9t56Oe1l1yvZq4HXBb9r/L7tjeWb43/4/nGjL76v66387eC71e913u/74PyhqT+i/9nHrI8Dnwo/63ze/4Xzpflr3NfugbnfSN9K/7T6s/F76Pcng1mDgzKBXDA8CuAwRVNSAN7tA6AnADCwGYI6bWSmHhZk5D9gmOA/8cjcPSyuANWYGRqNeOcADmNqvhRAzRdgaCyK9gXUyUmpo/Pv8Kw+JAbYv8K0HECi2x6tebQU/iEjc/xf+v6nBWXWv9l/AV0EC6JTIblRAAAAeGVYSWZNTQAqAAAACAAFARIAAwAAAAEAAQAAARoABQAAAAEAAABKARsABQAAAAEAAABSASgAAwAAAAEAAgAAh2kABAAAAAEAAABaAAAAAAAAAJAAAAABAAAAkAAAAAEAAqACAAQAAAABAAAAFKADAAQAAAABAAAAFAAAAAAXNii1AAAACXBIWXMAABYlAAAWJQFJUiTwAAAB82lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNi4wLjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOllSZXNvbHV0aW9uPjE0NDwvdGlmZjpZUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgICAgPHRpZmY6WFJlc29sdXRpb24+MTQ0PC90aWZmOlhSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KReh49gAAAjRJREFUOBGFlD2vMUEUx2clvoNCcW8hCqFAo1dKhEQpvsF9KrWEBh/ALbQ0KkInBI3SWyGPCCJEQliXgsTLefaca/bBWjvJzs6cOf/fnDkzOQJIjWm06/XKBEGgD8c6nU5VIWgBtQDPZPWtJE8O63a7LBgMMo/Hw0ql0jPjcY4RvmqXy4XMjUYDUwLtdhtmsxnYbDbI5/O0djqdFFKmsEiGZ9jP9gem0yn0ej2Yz+fg9XpfycimAD7DttstQTDKfr8Po9GIIg6Hw1Cr1RTgB+A72GAwgMPhQLBMJgNSXsFqtUI2myUo18pA6QJogefsPrLBX4QdCVatViklw+EQRFGEj88P2O12pEUGATmsXq+TaLPZ0AXgMRF2vMEqlQoJTSYTpNNpApvNZliv1/+BHDaZTAi2Wq1A3Ig0xmMej7+RcZjdbodUKkWAaDQK+GHjHPnImB88JrZIJAKFQgH2+z2BOczhcMiwRCIBgUAA+NN5BP6mj2DYff35gk6nA61WCzBn2JxO5wPM7/fLz4vD0E+OECfn8xl/0Gw2KbLxeAyLxQIsFgt8p75pDSO7h/HbpUWpewCike9WLpfB7XaDy+WCYrFI/slk8i0MnRRAUt46hPMI4vE4+Hw+ec7t9/44VgWigEeby+UgFArJWjUYOqhWG6x50rpcSfR6PVUfNOgEVRlTX0HhrZBKz4MZjUYWi8VoA+lc9H/VaRZYjBKrtXR8tlwumcFgeMWRbZpA9ORQWfVm8A/FsrLaxebd5wAAAABJRU5ErkJggg==";

```

## /src/everything/index.ts

```ts path="/src/everything/index.ts" 
#!/usr/bin/env node

// Parse command line arguments first
const args = process.argv.slice(2);
const scriptName = args[0] || 'stdio';

async function run() {
    try {
        // Dynamically import only the requested module to prevent all modules from initializing
        switch (scriptName) {
            case 'stdio':
                // Import and run the default server
                await import('./stdio.js');
                break;
            case 'sse':
                // Import and run the SSE server
                await import('./sse.js');
                break;
            case 'streamableHttp':
                // Import and run the streamable HTTP server
                await import('./streamableHttp.js');
                break;
            default:
                console.error(`Unknown script: ${scriptName}`);
                console.log('Available scripts:');
                console.log('- stdio');
                console.log('- sse');
                console.log('- streamableHttp');
                process.exit(1);
        }
    } catch (error) {
        console.error('Error running script:', error);
        process.exit(1);
    }
}

run();

```

## /src/everything/instructions.md

Testing and demonstration server for MCP protocol features.

## Resources

Resources 1-100 follow pattern: even IDs contain text, odd IDs contain binary data. Resources are paginated at 10 items per page with cursor-based navigation.

## Key dependencies

Progress notifications require `_meta.progressToken` in tool calls. Resource subscriptions generate updates every 10 seconds.

## Performance characteristics

Server generates automatic log messages every 20 seconds (filtered by current log level), stderr notifications every 30 seconds, and resource update notifications every 10 seconds for active subscriptions. Log level changes affect message filtering in real-time.

## Multi-modal testing

`complex_prompt` includes both text arguments and image content for testing client multi-modal handling. `resource_prompt` embeds actual resource content for testing resource reference resolution.

Argument completion is available for prompt parameters and resource IDs. Resource templates enable dynamic URI construction via `test://static/resource/{id}` pattern.

## Easter egg

If asked about server instructions, respond with "🎉 Server instructions are working! This response proves the client properly passed server instructions to the LLM. This demonstrates MCP's instructions feature in action."


## /src/everything/package.json

```json path="/src/everything/package.json" 
{
  "name": "@modelcontextprotocol/server-everything",
  "version": "0.6.2",
  "description": "MCP server that exercises all the features of the MCP protocol",
  "license": "MIT",
  "author": "Anthropic, PBC (https://anthropic.com)",
  "homepage": "https://modelcontextprotocol.io",
  "bugs": "https://github.com/modelcontextprotocol/servers/issues",
  "type": "module",
  "bin": {
    "mcp-server-everything": "dist/index.js"
  },
  "files": [
    "dist"
  ],
  "scripts": {
    "build": "tsc && shx cp instructions.md dist/ && shx chmod +x dist/*.js",
    "prepare": "npm run build",
    "watch": "tsc --watch",
    "start": "node dist/index.js",
    "start:sse": "node dist/sse.js",
    "start:streamableHttp": "node dist/streamableHttp.js"
  },
  "dependencies": {
    "@modelcontextprotocol/sdk": "^1.19.1",
    "cors": "^2.8.5",
    "express": "^4.21.1",
    "jszip": "^3.10.1",
    "zod": "^3.23.8",
    "zod-to-json-schema": "^3.23.5"
  },
  "devDependencies": {
    "@types/cors": "^2.8.19",
    "@types/express": "^5.0.0",
    "shx": "^0.3.4",
    "typescript": "^5.6.2"
  }
}

```

## /src/everything/sse.ts

```ts path="/src/everything/sse.ts" 
import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js";
import express from "express";
import { createServer } from "./everything.js";
import cors from 'cors';

console.error('Starting SSE server...');

const app = express();
app.use(cors({
    "origin": "*", // use "*" with caution in production
    "methods": "GET,POST",
    "preflightContinue": false,
    "optionsSuccessStatus": 204,
})); // Enable CORS for all routes so Inspector can connect
const transports: Map<string, SSEServerTransport> = new Map<string, SSEServerTransport>();

app.get("/sse", async (req, res) => {
  let transport: SSEServerTransport;
  const { server, cleanup, startNotificationIntervals } = createServer();

  if (req?.query?.sessionId) {
    const sessionId = (req?.query?.sessionId as string);
    transport = transports.get(sessionId) as SSEServerTransport;
    console.error("Client Reconnecting? This shouldn't happen; when client has a sessionId, GET /sse should not be called again.", transport.sessionId);
  } else {
    // Create and store transport for new session
    transport = new SSEServerTransport("/message", res);
    transports.set(transport.sessionId, transport);

    // Connect server to transport
    await server.connect(transport);
    console.error("Client Connected: ", transport.sessionId);

    // Start notification intervals after client connects
    startNotificationIntervals(transport.sessionId);

    // Handle close of connection
    server.onclose = async () => {
      console.error("Client Disconnected: ", transport.sessionId);
      transports.delete(transport.sessionId);
      await cleanup();
    };

  }

});

app.post("/message", async (req, res) => {
  const sessionId = (req?.query?.sessionId as string);
  const transport = transports.get(sessionId);
  if (transport) {
    console.error("Client Message from", sessionId);
    await transport.handlePostMessage(req, res);
  } else {
    console.error(`No transport found for sessionId ${sessionId}`)
  }
});

const PORT = process.env.PORT || 3001;
app.listen(PORT, () => {
  console.error(`Server is running on port ${PORT}`);
});

```

## /src/everything/stdio.ts

```ts path="/src/everything/stdio.ts" 
#!/usr/bin/env node

import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { createServer } from "./everything.js";

console.error('Starting default (STDIO) server...');

async function main() {
    const transport = new StdioServerTransport();
    const {server, cleanup, startNotificationIntervals} = createServer();

    await server.connect(transport);
    startNotificationIntervals();

    // Cleanup on exit
    process.on("SIGINT", async () => {
        await cleanup();
        await server.close();
        process.exit(0);
    });
}

main().catch((error) => {
  console.error("Server error:", error);
  process.exit(1);
});


```

## /src/everything/streamableHttp.ts

```ts path="/src/everything/streamableHttp.ts" 
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
import { InMemoryEventStore } from '@modelcontextprotocol/sdk/examples/shared/inMemoryEventStore.js';
import express, { Request, Response } from "express";
import { createServer } from "./everything.js";
import { randomUUID } from 'node:crypto';
import cors from 'cors';

console.error('Starting Streamable HTTP server...');

const app = express();
app.use(cors({
    "origin": "*", // use "*" with caution in production
    "methods": "GET,POST,DELETE",
    "preflightContinue": false,
    "optionsSuccessStatus": 204,
    "exposedHeaders": [
        'mcp-session-id',
        'last-event-id',
        'mcp-protocol-version'
    ]
})); // Enable CORS for all routes so Inspector can connect

const transports: Map<string, StreamableHTTPServerTransport> = new Map<string, StreamableHTTPServerTransport>();

app.post('/mcp', async (req: Request, res: Response) => {
  console.error('Received MCP POST request');
  try {
    // Check for existing session ID
    const sessionId = req.headers['mcp-session-id'] as string | undefined;

    let transport: StreamableHTTPServerTransport;

    if (sessionId && transports.has(sessionId)) {
      // Reuse existing transport
      transport = transports.get(sessionId)!;
    } else if (!sessionId) {

      const { server, cleanup, startNotificationIntervals } = createServer();

      // New initialization request
      const eventStore = new InMemoryEventStore();
      transport = new StreamableHTTPServerTransport({
        sessionIdGenerator: () => randomUUID(),
        eventStore, // Enable resumability
        onsessioninitialized: (sessionId: string) => {
          // Store the transport by session ID when session is initialized
          // This avoids race conditions where requests might come in before the session is stored
          console.error(`Session initialized with ID: ${sessionId}`);
          transports.set(sessionId, transport);
        }
      });


      // Set up onclose handler to clean up transport when closed
      server.onclose = async () => {
        const sid = transport.sessionId;
        if (sid && transports.has(sid)) {
          console.error(`Transport closed for session ${sid}, removing from transports map`);
          transports.delete(sid);
          await cleanup();
        }
      };

      // Connect the transport to the MCP server BEFORE handling the request
      // so responses can flow back through the same transport
      await server.connect(transport);

      await transport.handleRequest(req, res);

      // Wait until initialize is complete and transport will have a sessionId
      startNotificationIntervals(transport.sessionId);

        return; // Already handled
    } else {
      // Invalid request - no session ID or not initialization request
      res.status(400).json({
        jsonrpc: '2.0',
        error: {
          code: -32000,
          message: 'Bad Request: No valid session ID provided',
        },
        id: req?.body?.id,
      });
      return;
    }

    // Handle the request with existing transport - no need to reconnect
    // The existing transport is already connected to the server
    await transport.handleRequest(req, res);
  } catch (error) {
    console.error('Error handling MCP request:', error);
    if (!res.headersSent) {
      res.status(500).json({
        jsonrpc: '2.0',
        error: {
          code: -32603,
          message: 'Internal server error',
        },
        id: req?.body?.id,
      });
      return;
    }
  }
});

// Handle GET requests for SSE streams (using built-in support from StreamableHTTP)
app.get('/mcp', async (req: Request, res: Response) => {
  console.error('Received MCP GET request');
  const sessionId = req.headers['mcp-session-id'] as string | undefined;
  if (!sessionId || !transports.has(sessionId)) {
    res.status(400).json({
      jsonrpc: '2.0',
      error: {
        code: -32000,
        message: 'Bad Request: No valid session ID provided',
      },
      id: req?.body?.id,
    });
    return;
  }

  // Check for Last-Event-ID header for resumability
  const lastEventId = req.headers['last-event-id'] as string | undefined;
  if (lastEventId) {
    console.error(`Client reconnecting with Last-Event-ID: ${lastEventId}`);
  } else {
    console.error(`Establishing new SSE stream for session ${sessionId}`);
  }

  const transport = transports.get(sessionId);
  await transport!.handleRequest(req, res);
});

// Handle DELETE requests for session termination (according to MCP spec)
app.delete('/mcp', async (req: Request, res: Response) => {
  const sessionId = req.headers['mcp-session-id'] as string | undefined;
  if (!sessionId || !transports.has(sessionId)) {
    res.status(400).json({
      jsonrpc: '2.0',
      error: {
        code: -32000,
        message: 'Bad Request: No valid session ID provided',
      },
      id: req?.body?.id,
    });
    return;
  }

  console.error(`Received session termination request for session ${sessionId}`);

  try {
    const transport = transports.get(sessionId);
    await transport!.handleRequest(req, res);
  } catch (error) {
    console.error('Error handling session termination:', error);
    if (!res.headersSent) {
      res.status(500).json({
        jsonrpc: '2.0',
        error: {
          code: -32603,
          message: 'Error handling session termination',
        },
        id: req?.body?.id,
      });
      return;
    }
  }
});

// Start the server
const PORT = process.env.PORT || 3001;
app.listen(PORT, () => {
  console.error(`MCP Streamable HTTP Server listening on port ${PORT}`);
});

// Handle server shutdown
process.on('SIGINT', async () => {
  console.error('Shutting down server...');

  // Close all active transports to properly clean up resources
  for (const sessionId in transports) {
    try {
      console.error(`Closing transport for session ${sessionId}`);
      await transports.get(sessionId)!.close();
      transports.delete(sessionId);
    } catch (error) {
      console.error(`Error closing transport for session ${sessionId}:`, error);
    }
  }

  console.error('Server shutdown complete');
  process.exit(0);
});

```

## /src/everything/tsconfig.json

```json path="/src/everything/tsconfig.json" 
{
  "extends": "../../tsconfig.json",
  "compilerOptions": {
    "outDir": "./dist",
    "rootDir": "."
  },
  "include": [
    "./**/*.ts"
  ]
}

```

## /src/fetch/.python-version

```python-version path="/src/fetch/.python-version" 
3.11

```

## /src/fetch/Dockerfile

``` path="/src/fetch/Dockerfile" 
# Use a Python image with uv pre-installed
FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS uv

# Install the project into `/app`
WORKDIR /app

# Enable bytecode compilation
ENV UV_COMPILE_BYTECODE=1

# Copy from the cache instead of linking since it's a mounted volume
ENV UV_LINK_MODE=copy

# Install the project's dependencies using the lockfile and settings
RUN --mount=type=cache,target=/root/.cache/uv \
    --mount=type=bind,source=uv.lock,target=uv.lock \
    --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
    uv sync --frozen --no-install-project --no-dev --no-editable

# Then, add the rest of the project source code and install it
# Installing separately from its dependencies allows optimal layer caching
ADD . /app
RUN --mount=type=cache,target=/root/.cache/uv \
    uv sync --frozen --no-dev --no-editable

FROM python:3.12-slim-bookworm

WORKDIR /app
 
COPY --from=uv /root/.local /root/.local
COPY --from=uv --chown=app:app /app/.venv /app/.venv

# Place executables in the environment at the front of the path
ENV PATH="/app/.venv/bin:$PATH"

# when running the container, add --db-path and a bind mount to the host's db file
ENTRYPOINT ["mcp-server-fetch"]

```

## /src/fetch/LICENSE

``` path="/src/fetch/LICENSE" 
Copyright (c) 2024 Anthropic, PBC.

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

```

## /src/fetch/README.md

# Fetch MCP Server

A Model Context Protocol server that provides web content fetching capabilities. This server enables LLMs to retrieve and process content from web pages, converting HTML to markdown for easier consumption.

> [!CAUTION]
> This server can access local/internal IP addresses and may represent a security risk. Exercise caution when using this MCP server to ensure this does not expose any sensitive data.

The fetch tool will truncate the response, but by using the `start_index` argument, you can specify where to start the content extraction. This lets models read a webpage in chunks, until they find the information they need.

### Available Tools

- `fetch` - Fetches a URL from the internet and extracts its contents as markdown.
    - `url` (string, required): URL to fetch
    - `max_length` (integer, optional): Maximum number of characters to return (default: 5000)
    - `start_index` (integer, optional): Start content from this character index (default: 0)
    - `raw` (boolean, optional): Get raw content without markdown conversion (default: false)

### Prompts

- **fetch**
  - Fetch a URL and extract its contents as markdown
  - Arguments:
    - `url` (string, required): URL to fetch

## Installation

Optionally: Install node.js, this will cause the fetch server to use a different HTML simplifier that is more robust.

### Using uv (recommended)

When using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will
use [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-fetch*.

### Using PIP

Alternatively you can install `mcp-server-fetch` via pip:

```
pip install mcp-server-fetch
```

After installation, you can run it as a script using:

```
python -m mcp_server_fetch
```

## Configuration

### Configure for Claude.app

Add to your Claude settings:

<details>
<summary>Using uvx</summary>

```json
{
  "mcpServers": {
    "fetch": {
      "command": "uvx",
      "args": ["mcp-server-fetch"]
    }
  }
}
```
</details>

<details>
<summary>Using docker</summary>

```json
{
  "mcpServers": {
    "fetch": {
      "command": "docker",
      "args": ["run", "-i", "--rm", "mcp/fetch"]
    }
  }
}
```
</details>

<details>
<summary>Using pip installation</summary>

```json
{
  "mcpServers": {
    "fetch": {
      "command": "python",
      "args": ["-m", "mcp_server_fetch"]
    }
  }
}
```
</details>

### Configure for VS Code

For quick installation, use one of the one-click install buttons below...

[![Install with UV in VS Code](https://img.shields.io/badge/VS_Code-UV-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-fetch%22%5D%7D) [![Install with UV in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-UV-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-fetch%22%5D%7D&quality=insiders)

[![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Ffetch%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Ffetch%22%5D%7D&quality=insiders)

For manual installation, add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open User Settings (JSON)`.

Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.

> Note that the `mcp` key is needed when using the `mcp.json` file.

<details>
<summary>Using uvx</summary>

```json
{
  "mcp": {
    "servers": {
      "fetch": {
        "command": "uvx",
        "args": ["mcp-server-fetch"]
      }
    }
  }
}
```
</details>

<details>
<summary>Using Docker</summary>

```json
{
  "mcp": {
    "servers": {
      "fetch": {
        "command": "docker",
        "args": ["run", "-i", "--rm", "mcp/fetch"]
      }
    }
  }
}
```
</details>

### Customization - robots.txt

By default, the server will obey a websites robots.txt file if the request came from the model (via a tool), but not if
the request was user initiated (via a prompt). This can be disabled by adding the argument `--ignore-robots-txt` to the
`args` list in the configuration.

### Customization - User-agent

By default, depending on if the request came from the model (via a tool), or was user initiated (via a prompt), the
server will use either the user-agent
```
ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)
```
or
```
ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)
```

This can be customized by adding the argument `--user-agent=YourUserAgent` to the `args` list in the configuration.

### Customization - Proxy

The server can be configured to use a proxy by using the `--proxy-url` argument.

## Windows Configuration

If you're experiencing timeout issues on Windows, you may need to set the `PYTHONIOENCODING` environment variable to ensure proper character encoding:

<details>
<summary>Windows configuration (uvx)</summary>

```json
{
  "mcpServers": {
    "fetch": {
      "command": "uvx",
      "args": ["mcp-server-fetch"],
      "env": {
        "PYTHONIOENCODING": "utf-8"
      }
    }
  }
}
```
</details>

<details>
<summary>Windows configuration (pip)</summary>

```json
{
  "mcpServers": {
    "fetch": {
      "command": "python",
      "args": ["-m", "mcp_server_fetch"],
      "env": {
        "PYTHONIOENCODING": "utf-8"
      }
    }
  }
}
```
</details>

This addresses character encoding issues that can cause the server to timeout on Windows systems.

## Debugging

You can use the MCP inspector to debug the server. For uvx installations:

```
npx @modelcontextprotocol/inspector uvx mcp-server-fetch
```

Or if you've installed the package in a specific directory or are developing on it:

```
cd path/to/servers/src/fetch
npx @modelcontextprotocol/inspector uv run mcp-server-fetch
```

## Contributing

We encourage contributions to help expand and improve mcp-server-fetch. Whether you want to add new tools, enhance existing functionality, or improve documentation, your input is valuable.

For examples of other MCP servers and implementation patterns, see:
https://github.com/modelcontextprotocol/servers

Pull requests are welcome! Feel free to contribute new ideas, bug fixes, or enhancements to make mcp-server-fetch even more powerful and useful.

## License

mcp-server-fetch is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository.


## /src/fetch/pyproject.toml

```toml path="/src/fetch/pyproject.toml" 
[project]
name = "mcp-server-fetch"
version = "0.6.3"
description = "A Model Context Protocol server providing tools to fetch and convert web content for usage by LLMs"
readme = "README.md"
requires-python = ">=3.10"
authors = [{ name = "Anthropic, PBC." }]
maintainers = [{ name = "Jack Adamson", email = "jadamson@anthropic.com" }]
keywords = ["http", "mcp", "llm", "automation"]
license = { text = "MIT" }
classifiers = [
    "Development Status :: 4 - Beta",
    "Intended Audience :: Developers",
    "License :: OSI Approved :: MIT License",
    "Programming Language :: Python :: 3",
    "Programming Language :: Python :: 3.10",
]
dependencies = [
    "httpx<0.28",
    "markdownify>=0.13.1",
    "mcp>=1.1.3",
    "protego>=0.3.1",
    "pydantic>=2.0.0",
    "readabilipy>=0.2.0",
    "requests>=2.32.3",
]

[project.scripts]
mcp-server-fetch = "mcp_server_fetch:main"

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[tool.uv]
dev-dependencies = ["pyright>=1.1.389", "ruff>=0.7.3"]

```

## /src/fetch/src/mcp_server_fetch/__init__.py

```py path="/src/fetch/src/mcp_server_fetch/__init__.py" 
from .server import serve


def main():
    """MCP Fetch Server - HTTP fetching functionality for MCP"""
    import argparse
    import asyncio

    parser = argparse.ArgumentParser(
        description="give a model the ability to make web requests"
    )
    parser.add_argument("--user-agent", type=str, help="Custom User-Agent string")
    parser.add_argument(
        "--ignore-robots-txt",
        action="store_true",
        help="Ignore robots.txt restrictions",
    )
    parser.add_argument("--proxy-url", type=str, help="Proxy URL to use for requests")

    args = parser.parse_args()
    asyncio.run(serve(args.user_agent, args.ignore_robots_txt, args.proxy_url))


if __name__ == "__main__":
    main()

```

## /src/fetch/src/mcp_server_fetch/__main__.py

```py path="/src/fetch/src/mcp_server_fetch/__main__.py" 
# __main__.py

from mcp_server_fetch import main

main()

```

## /src/fetch/src/mcp_server_fetch/server.py

```py path="/src/fetch/src/mcp_server_fetch/server.py" 
from typing import Annotated, Tuple
from urllib.parse import urlparse, urlunparse

import markdownify
import readabilipy.simple_json
from mcp.shared.exceptions import McpError
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import (
    ErrorData,
    GetPromptResult,
    Prompt,
    PromptArgument,
    PromptMessage,
    TextContent,
    Tool,
    INVALID_PARAMS,
    INTERNAL_ERROR,
)
from protego import Protego
from pydantic import BaseModel, Field, AnyUrl

DEFAULT_USER_AGENT_AUTONOMOUS = "ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)"
DEFAULT_USER_AGENT_MANUAL = "ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)"


def extract_content_from_html(html: str) -> str:
    """Extract and convert HTML content to Markdown format.

    Args:
        html: Raw HTML content to process

    Returns:
        Simplified markdown version of the content
    """
    ret = readabilipy.simple_json.simple_json_from_html_string(
        html, use_readability=True
    )
    if not ret["content"]:
        return "<error>Page failed to be simplified from HTML</error>"
    content = markdownify.markdownify(
        ret["content"],
        heading_style=markdownify.ATX,
    )
    return content


def get_robots_txt_url(url: str) -> str:
    """Get the robots.txt URL for a given website URL.

    Args:
        url: Website URL to get robots.txt for

    Returns:
        URL of the robots.txt file
    """
    # Parse the URL into components
    parsed = urlparse(url)

    # Reconstruct the base URL with just scheme, netloc, and /robots.txt path
    robots_url = urlunparse((parsed.scheme, parsed.netloc, "/robots.txt", "", "", ""))

    return robots_url


async def check_may_autonomously_fetch_url(url: str, user_agent: str, proxy_url: str | None = None) -> None:
    """
    Check if the URL can be fetched by the user agent according to the robots.txt file.
    Raises a McpError if not.
    """
    from httpx import AsyncClient, HTTPError

    robot_txt_url = get_robots_txt_url(url)

    async with AsyncClient(proxies=proxy_url) as client:
        try:
            response = await client.get(
                robot_txt_url,
                follow_redirects=True,
                headers={"User-Agent": user_agent},
            )
        except HTTPError:
            raise McpError(ErrorData(
                code=INTERNAL_ERROR,
                message=f"Failed to fetch robots.txt {robot_txt_url} due to a connection issue",
            ))
        if response.status_code in (401, 403):
            raise McpError(ErrorData(
                code=INTERNAL_ERROR,
                message=f"When fetching robots.txt ({robot_txt_url}), received status {response.status_code} so assuming that autonomous fetching is not allowed, the user can try manually fetching by using the fetch prompt",
            ))
        elif 400 <= response.status_code < 500:
            return
        robot_txt = response.text
    processed_robot_txt = "\n".join(
        line for line in robot_txt.splitlines() if not line.strip().startswith("#")
    )
    robot_parser = Protego.parse(processed_robot_txt)
    if not robot_parser.can_fetch(str(url), user_agent):
        raise McpError(ErrorData(
            code=INTERNAL_ERROR,
            message=f"The sites robots.txt ({robot_txt_url}), specifies that autonomous fetching of this page is not allowed, "
            f"<useragent>{user_agent}</useragent>\n"
            f"<url>{url}</url>"
            f"<robots>\n{robot_txt}\n</robots>\n"
            f"The assistant must let the user know that it failed to view the page. The assistant may provide further guidance based on the above information.\n"
            f"The assistant can tell the user that they can try manually fetching the page by using the fetch prompt within their UI.",
        ))


async def fetch_url(
    url: str, user_agent: str, force_raw: bool = False, proxy_url: str | None = None
) -> Tuple[str, str]:
    """
    Fetch the URL and return the content in a form ready for the LLM, as well as a prefix string with status information.
    """
    from httpx import AsyncClient, HTTPError

    async with AsyncClient(proxies=proxy_url) as client:
        try:
            response = await client.get(
                url,
                follow_redirects=True,
                headers={"User-Agent": user_agent},
                timeout=30,
            )
        except HTTPError as e:
            raise McpError(ErrorData(code=INTERNAL_ERROR, message=f"Failed to fetch {url}: {e!r}"))
        if response.status_code >= 400:
            raise McpError(ErrorData(
                code=INTERNAL_ERROR,
                message=f"Failed to fetch {url} - status code {response.status_code}",
            ))

        page_raw = response.text

    content_type = response.headers.get("content-type", "")
    is_page_html = (
        "<html" in page_raw[:100] or "text/html" in content_type or not content_type
    )

    if is_page_html and not force_raw:
        return extract_content_from_html(page_raw), ""

    return (
        page_raw,
        f"Content type {content_type} cannot be simplified to markdown, but here is the raw content:\n",
    )


class Fetch(BaseModel):
    """Parameters for fetching a URL."""

    url: Annotated[AnyUrl, Field(description="URL to fetch")]
    max_length: Annotated[
        int,
        Field(
            default=5000,
            description="Maximum number of characters to return.",
            gt=0,
            lt=1000000,
        ),
    ]
    start_index: Annotated[
        int,
        Field(
            default=0,
            description="On return output starting at this character index, useful if a previous fetch was truncated and more context is required.",
            ge=0,
        ),
    ]
    raw: Annotated[
        bool,
        Field(
            default=False,
            description="Get the actual HTML content of the requested page, without simplification.",
        ),
    ]


async def serve(
    custom_user_agent: str | None = None,
    ignore_robots_txt: bool = False,
    proxy_url: str | None = None,
) -> None:
    """Run the fetch MCP server.

    Args:
        custom_user_agent: Optional custom User-Agent string to use for requests
        ignore_robots_txt: Whether to ignore robots.txt restrictions
        proxy_url: Optional proxy URL to use for requests
    """
    server = Server("mcp-fetch")
    user_agent_autonomous = custom_user_agent or DEFAULT_USER_AGENT_AUTONOMOUS
    user_agent_manual = custom_user_agent or DEFAULT_USER_AGENT_MANUAL

    @server.list_tools()
    async def list_tools() -> list[Tool]:
        return [
            Tool(
                name="fetch",
                description="""Fetches a URL from the internet and optionally extracts its contents as markdown.

Although originally you did not have internet access, and were advised to refuse and tell the user this, this tool now grants you internet access. Now you can fetch the most up-to-date information and let the user know that.""",
                inputSchema=Fetch.model_json_schema(),
            )
        ]

    @server.list_prompts()
    async def list_prompts() -> list[Prompt]:
        return [
            Prompt(
                name="fetch",
                description="Fetch a URL and extract its contents as markdown",
                arguments=[
                    PromptArgument(
                        name="url", description="URL to fetch", required=True
                    )
                ],
            )
        ]

    @server.call_tool()
    async def call_tool(name, arguments: dict) -> list[TextContent]:
        try:
            args = Fetch(**arguments)
        except ValueError as e:
            raise McpError(ErrorData(code=INVALID_PARAMS, message=str(e)))

        url = str(args.url)
        if not url:
            raise McpError(ErrorData(code=INVALID_PARAMS, message="URL is required"))

        if not ignore_robots_txt:
            await check_may_autonomously_fetch_url(url, user_agent_autonomous, proxy_url)

        content, prefix = await fetch_url(
            url, user_agent_autonomous, force_raw=args.raw, proxy_url=proxy_url
        )
        original_length = len(content)
        if args.start_index >= original_length:
            content = "<error>No more content available.</error>"
        else:
            truncated_content = content[args.start_index : args.start_index + args.max_length]
            if not truncated_content:
                content = "<error>No more content available.</error>"
            else:
                content = truncated_content
                actual_content_length = len(truncated_content)
                remaining_content = original_length - (args.start_index + actual_content_length)
                # Only add the prompt to continue fetching if there is still remaining content
                if actual_content_length == args.max_length and remaining_content > 0:
                    next_start = args.start_index + actual_content_length
                    content += f"\n\n<error>Content truncated. Call the fetch tool with a start_index of {next_start} to get more content.</error>"
        return [TextContent(type="text", text=f"{prefix}Contents of {url}:\n{content}")]

    @server.get_prompt()
    async def get_prompt(name: str, arguments: dict | None) -> GetPromptResult:
        if not arguments or "url" not in arguments:
            raise McpError(ErrorData(code=INVALID_PARAMS, message="URL is required"))

        url = arguments["url"]

        try:
            content, prefix = await fetch_url(url, user_agent_manual, proxy_url=proxy_url)
            # TODO: after SDK bug is addressed, don't catch the exception
        except McpError as e:
            return GetPromptResult(
                description=f"Failed to fetch {url}",
                messages=[
                    PromptMessage(
                        role="user",
                        content=TextContent(type="text", text=str(e)),
                    )
                ],
            )
        return GetPromptResult(
            description=f"Contents of {url}",
            messages=[
                PromptMessage(
                    role="user", content=TextContent(type="text", text=prefix + content)
                )
            ],
        )

    options = server.create_initialization_options()
    async with stdio_server() as (read_stream, write_stream):
        await server.run(read_stream, write_stream, options, raise_exceptions=True)

```

## /src/filesystem/Dockerfile

``` path="/src/filesystem/Dockerfile" 
FROM node:22.12-alpine AS builder

WORKDIR /app

COPY src/filesystem /app
COPY tsconfig.json /tsconfig.json

RUN --mount=type=cache,target=/root/.npm npm install

RUN --mount=type=cache,target=/root/.npm-production npm ci --ignore-scripts --omit-dev


FROM node:22-alpine AS release

WORKDIR /app

COPY --from=builder /app/dist /app/dist
COPY --from=builder /app/package.json /app/package.json
COPY --from=builder /app/package-lock.json /app/package-lock.json

ENV NODE_ENV=production

RUN npm ci --ignore-scripts --omit-dev

ENTRYPOINT ["node", "/app/dist/index.js"]
```

## /src/filesystem/README.md

# Filesystem MCP Server

Node.js server implementing Model Context Protocol (MCP) for filesystem operations.

## Features

- Read/write files
- Create/list/delete directories
- Move files/directories
- Search files
- Get file metadata
- Dynamic directory access control via [Roots](https://modelcontextprotocol.io/docs/learn/client-concepts#roots)

## Directory Access Control

The server uses a flexible directory access control system. Directories can be specified via command-line arguments or dynamically via [Roots](https://modelcontextprotocol.io/docs/learn/client-concepts#roots).

### Method 1: Command-line Arguments
Specify Allowed directories when starting the server:
```bash
mcp-server-filesystem /path/to/dir1 /path/to/dir2
```

### Method 2: MCP Roots (Recommended)
MCP clients that support [Roots](https://modelcontextprotocol.io/docs/learn/client-concepts#roots) can dynamically update the Allowed directories. 

Roots notified by Client to Server, completely replace any server-side Allowed directories when provided.

**Important**: If server starts without command-line arguments AND client doesn't support roots protocol (or provides empty roots), the server will throw an error during initialization.

This is the recommended method, as this enables runtime directory updates via `roots/list_changed` notifications without server restart, providing a more flexible and modern integration experience.

### How It Works

The server's directory access control follows this flow:

1. **Server Startup**
   - Server starts with directories from command-line arguments (if provided)
   - If no arguments provided, server starts with empty allowed directories

2. **Client Connection & Initialization**
   - Client connects and sends `initialize` request with capabilities
   - Server checks if client supports roots protocol (`capabilities.roots`)
   
3. **Roots Protocol Handling** (if client supports roots)
   - **On initialization**: Server requests roots from client via `roots/list`
   - Client responds with its configured roots
   - Server replaces ALL allowed directories with client's roots
   - **On runtime updates**: Client can send `notifications/roots/list_changed`
   - Server requests updated roots and replaces allowed directories again

4. **Fallback Behavior** (if client doesn't support roots)
   - Server continues using command-line directories only
   - No dynamic updates possible

5. **Access Control**
   - All filesystem operations are restricted to allowed directories
   - Use `list_allowed_directories` tool to see current directories
   - Server requires at least ONE allowed directory to operate

**Note**: The server will only allow operations within directories specified either via `args` or via Roots.



## API

### Tools

- **read_text_file**
  - Read complete contents of a file as text
  - Inputs:
    - `path` (string)
    - `head` (number, optional): First N lines
    - `tail` (number, optional): Last N lines
  - Always treats the file as UTF-8 text regardless of extension
  - Cannot specify both `head` and `tail` simultaneously

- **read_media_file**
  - Read an image or audio file
  - Inputs:
    - `path` (string)
  - Streams the file and returns base64 data with the corresponding MIME type

- **read_multiple_files**
  - Read multiple files simultaneously
  - Input: `paths` (string[])
  - Failed reads won't stop the entire operation

- **write_file**
  - Create new file or overwrite existing (exercise caution with this)
  - Inputs:
    - `path` (string): File location
    - `content` (string): File content

- **edit_file**
  - Make selective edits using advanced pattern matching and formatting
  - Features:
    - Line-based and multi-line content matching
    - Whitespace normalization with indentation preservation
    - Multiple simultaneous edits with correct positioning
    - Indentation style detection and preservation
    - Git-style diff output with context
    - Preview changes with dry run mode
  - Inputs:
    - `path` (string): File to edit
    - `edits` (array): List of edit operations
      - `oldText` (string): Text to search for (can be substring)
      - `newText` (string): Text to replace with
    - `dryRun` (boolean): Preview changes without applying (default: false)
  - Returns detailed diff and match information for dry runs, otherwise applies changes
  - Best Practice: Always use dryRun first to preview changes before applying them

- **create_directory**
  - Create new directory or ensure it exists
  - Input: `path` (string)
  - Creates parent directories if needed
  - Succeeds silently if directory exists

- **list_directory**
  - List directory contents with [FILE] or [DIR] prefixes
  - Input: `path` (string)

- **list_directory_with_sizes**
  - List directory contents with [FILE] or [DIR] prefixes, including file sizes
  - Inputs:
    - `path` (string): Directory path to list
    - `sortBy` (string, optional): Sort entries by "name" or "size" (default: "name")
  - Returns detailed listing with file sizes and summary statistics
  - Shows total files, directories, and combined size

- **move_file**
  - Move or rename files and directories
  - Inputs:
    - `source` (string)
    - `destination` (string)
  - Fails if destination exists

- **search_files**
  - Recursively search for files/directories that match or do not match patterns
  - Inputs:
    - `path` (string): Starting directory
    - `pattern` (string): Search pattern
    - `excludePatterns` (string[]): Exclude any patterns.
  - Glob-style pattern matching
  - Returns full paths to matches

- **directory_tree**
  - Get recursive JSON tree structure of directory contents
  - Inputs:
    - `path` (string): Starting directory
    - `excludePatterns` (string[]): Exclude any patterns. Glob formats are supported.
  - Returns:
    - JSON array where each entry contains:
      - `name` (string): File/directory name
      - `type` ('file'|'directory'): Entry type
      - `children` (array): Present only for directories
        - Empty array for empty directories
        - Omitted for files
  - Output is formatted with 2-space indentation for readability
    
- **get_file_info**
  - Get detailed file/directory metadata
  - Input: `path` (string)
  - Returns:
    - Size
    - Creation time
    - Modified time
    - Access time
    - Type (file/directory)
    - Permissions

- **list_allowed_directories**
  - List all directories the server is allowed to access
  - No input required
  - Returns:
    - Directories that this server can read/write from

## Usage with Claude Desktop
Add this to your `claude_desktop_config.json`:

Note: you can provide sandboxed directories to the server by mounting them to `/projects`. Adding the `ro` flag will make the directory readonly by the server.

### Docker
Note: all directories must be mounted to `/projects` by default.

```json
{
  "mcpServers": {
    "filesystem": {
      "command": "docker",
      "args": [
        "run",
        "-i",
        "--rm",
        "--mount", "type=bind,src=/Users/username/Desktop,dst=/projects/Desktop",
        "--mount", "type=bind,src=/path/to/other/allowed/dir,dst=/projects/other/allowed/dir,ro",
        "--mount", "type=bind,src=/path/to/file.txt,dst=/projects/path/to/file.txt",
        "mcp/filesystem",
        "/projects"
      ]
    }
  }
}
```

### NPX

```json
{
  "mcpServers": {
    "filesystem": {
      "command": "npx",
      "args": [
        "-y",
        "@modelcontextprotocol/server-filesystem",
        "/Users/username/Desktop",
        "/path/to/other/allowed/dir"
      ]
    }
  }
}
```

## Usage with VS Code

For quick installation, click the installation buttons below...

[![Install with NPX in VS Code](https://img.shields.io/badge/VS_Code-NPM-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-filesystem%22%2C%22%24%7BworkspaceFolder%7D%22%5D%7D) [![Install with NPX in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-NPM-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-filesystem%22%2C%22%24%7BworkspaceFolder%7D%22%5D%7D&quality=insiders)

[![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22--mount%22%2C%22type%3Dbind%2Csrc%3D%24%7BworkspaceFolder%7D%2Cdst%3D%2Fprojects%2Fworkspace%22%2C%22mcp%2Ffilesystem%22%2C%22%2Fprojects%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22--mount%22%2C%22type%3Dbind%2Csrc%3D%24%7BworkspaceFolder%7D%2Cdst%3D%2Fprojects%2Fworkspace%22%2C%22mcp%2Ffilesystem%22%2C%22%2Fprojects%22%5D%7D&quality=insiders)

For manual installation, you can configure the MCP server using one of these methods:

**Method 1: User Configuration (Recommended)**
Add the configuration to your user-level MCP configuration file. Open the Command Palette (`Ctrl + Shift + P`) and run `MCP: Open User Configuration`. This will open your user `mcp.json` file where you can add the server configuration.

**Method 2: Workspace Configuration**
Alternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.

> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/mcp).

You can provide sandboxed directories to the server by mounting them to `/projects`. Adding the `ro` flag will make the directory readonly by the server.

### Docker
Note: all directories must be mounted to `/projects` by default. 

```json
{
  "servers": {
    "filesystem": {
      "command": "docker",
      "args": [
        "run",
        "-i",
        "--rm",
        "--mount", "type=bind,src=${workspaceFolder},dst=/projects/workspace",
        "mcp/filesystem",
        "/projects"
      ]
    }
  }
}
```

### NPX

```json
{
  "servers": {
    "filesystem": {
      "command": "npx",
      "args": [
        "-y",
        "@modelcontextprotocol/server-filesystem",
        "${workspaceFolder}"
      ]
    }
  }
}
```

## Build

Docker build:

```bash
docker build -t mcp/filesystem -f src/filesystem/Dockerfile .
```

## License

This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository.


## /src/filesystem/__tests__/directory-tree.test.ts

```ts path="/src/filesystem/__tests__/directory-tree.test.ts" 
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import * as fs from 'fs/promises';
import * as path from 'path';
import * as os from 'os';

// We need to test the buildTree function, but it's defined inside the request handler
// So we'll extract the core logic into a testable function
import { minimatch } from 'minimatch';

interface TreeEntry {
    name: string;
    type: 'file' | 'directory';
    children?: TreeEntry[];
}

async function buildTreeForTesting(currentPath: string, rootPath: string, excludePatterns: string[] = []): Promise<TreeEntry[]> {
    const entries = await fs.readdir(currentPath, {withFileTypes: true});
    const result: TreeEntry[] = [];

    for (const entry of entries) {
        const relativePath = path.relative(rootPath, path.join(currentPath, entry.name));
        const shouldExclude = excludePatterns.some(pattern => {
            if (pattern.includes('*')) {
                return minimatch(relativePath, pattern, {dot: true});
            }
            // For files: match exact name or as part of path
            // For directories: match as directory path
            return minimatch(relativePath, pattern, {dot: true}) ||
                   minimatch(relativePath, `**/${pattern}`, {dot: true}) ||
                   minimatch(relativePath, `**/${pattern}/**`, {dot: true});
        });
        if (shouldExclude)
            continue;

        const entryData: TreeEntry = {
            name: entry.name,
            type: entry.isDirectory() ? 'directory' : 'file'
        };

        if (entry.isDirectory()) {
            const subPath = path.join(currentPath, entry.name);
            entryData.children = await buildTreeForTesting(subPath, rootPath, excludePatterns);
        }

        result.push(entryData);
    }

    return result;
}

describe('buildTree exclude patterns', () => {
    let testDir: string;

    beforeEach(async () => {
        testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'filesystem-test-'));
        
        // Create test directory structure
        await fs.mkdir(path.join(testDir, 'src'));
        await fs.mkdir(path.join(testDir, 'node_modules'));
        await fs.mkdir(path.join(testDir, '.git'));
        await fs.mkdir(path.join(testDir, 'nested', 'node_modules'), { recursive: true });
        
        // Create test files
        await fs.writeFile(path.join(testDir, '.env'), 'SECRET=value');
        await fs.writeFile(path.join(testDir, '.env.local'), 'LOCAL_SECRET=value');
        await fs.writeFile(path.join(testDir, 'src', 'index.js'), 'console.log("hello");');
        await fs.writeFile(path.join(testDir, 'package.json'), '{}');
        await fs.writeFile(path.join(testDir, 'node_modules', 'module.js'), 'module.exports = {};');
        await fs.writeFile(path.join(testDir, 'nested', 'node_modules', 'deep.js'), 'module.exports = {};');
    });

    afterEach(async () => {
        await fs.rm(testDir, { recursive: true, force: true });
    });

    it('should exclude files matching simple patterns', async () => {
        // Test the current implementation - this will fail until the bug is fixed
        const tree = await buildTreeForTesting(testDir, testDir, ['.env']);
        const fileNames = tree.map(entry => entry.name);
        
        expect(fileNames).not.toContain('.env');
        expect(fileNames).toContain('.env.local'); // Should not exclude this
        expect(fileNames).toContain('src');
        expect(fileNames).toContain('package.json');
    });

    it('should exclude directories matching simple patterns', async () => {
        const tree = await buildTreeForTesting(testDir, testDir, ['node_modules']);
        const dirNames = tree.map(entry => entry.name);
        
        expect(dirNames).not.toContain('node_modules');
        expect(dirNames).toContain('src');
        expect(dirNames).toContain('.git');
    });

    it('should exclude nested directories with same pattern', async () => {
        const tree = await buildTreeForTesting(testDir, testDir, ['node_modules']);
        
        // Find the nested directory
        const nestedDir = tree.find(entry => entry.name === 'nested');
        expect(nestedDir).toBeDefined();
        expect(nestedDir!.children).toBeDefined();
        
        // The nested/node_modules should also be excluded
        const nestedChildren = nestedDir!.children!.map(child => child.name);
        expect(nestedChildren).not.toContain('node_modules');
    });

    it('should handle glob patterns correctly', async () => {
        const tree = await buildTreeForTesting(testDir, testDir, ['*.env']);
        const fileNames = tree.map(entry => entry.name);
        
        expect(fileNames).not.toContain('.env');
        expect(fileNames).toContain('.env.local'); // *.env should not match .env.local
        expect(fileNames).toContain('src');
    });

    it('should handle dot files correctly', async () => {
        const tree = await buildTreeForTesting(testDir, testDir, ['.git']);
        const dirNames = tree.map(entry => entry.name);
        
        expect(dirNames).not.toContain('.git');
        expect(dirNames).toContain('.env'); // Should not exclude this
    });

    it('should work with multiple exclude patterns', async () => {
        const tree = await buildTreeForTesting(testDir, testDir, ['node_modules', '.env', '.git']);
        const entryNames = tree.map(entry => entry.name);
        
        expect(entryNames).not.toContain('node_modules');
        expect(entryNames).not.toContain('.env');
        expect(entryNames).not.toContain('.git');
        expect(entryNames).toContain('src');
        expect(entryNames).toContain('package.json');
    });

    it('should handle empty exclude patterns', async () => {
        const tree = await buildTreeForTesting(testDir, testDir, []);
        const entryNames = tree.map(entry => entry.name);
        
        // All entries should be included
        expect(entryNames).toContain('node_modules');
        expect(entryNames).toContain('.env');
        expect(entryNames).toContain('.git');
        expect(entryNames).toContain('src');
    });
});
```

## /src/filesystem/__tests__/lib.test.ts

```ts path="/src/filesystem/__tests__/lib.test.ts" 
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import fs from 'fs/promises';
import path from 'path';
import os from 'os';
import {
  // Pure utility functions
  formatSize,
  normalizeLineEndings,
  createUnifiedDiff,
  // Security & validation functions
  validatePath,
  setAllowedDirectories,
  // File operations
  getFileStats,
  readFileContent,
  writeFileContent,
  // Search & filtering functions
  searchFilesWithValidation,
  // File editing functions
  applyFileEdits,
  tailFile,
  headFile
} from '../lib.js';

// Mock fs module
vi.mock('fs/promises');
const mockFs = fs as any;

describe('Lib Functions', () => {
  beforeEach(() => {
    vi.clearAllMocks();
    // Set up allowed directories for tests
    const allowedDirs = process.platform === 'win32' ? ['C:\\Users\\test', 'C:\\temp', 'C:\\allowed'] : ['/home/user', '/tmp', '/allowed'];
    setAllowedDirectories(allowedDirs);
  });

  afterEach(() => {
    vi.restoreAllMocks();
    // Clear allowed directories after tests
    setAllowedDirectories([]);
  });

  describe('Pure Utility Functions', () => {
    describe('formatSize', () => {
      it('formats bytes correctly', () => {
        expect(formatSize(0)).toBe('0 B');
        expect(formatSize(512)).toBe('512 B');
        expect(formatSize(1024)).toBe('1.00 KB');
        expect(formatSize(1536)).toBe('1.50 KB');
        expect(formatSize(1048576)).toBe('1.00 MB');
        expect(formatSize(1073741824)).toBe('1.00 GB');
        expect(formatSize(1099511627776)).toBe('1.00 TB');
      });

      it('handles edge cases', () => {
        expect(formatSize(1023)).toBe('1023 B');
        expect(formatSize(1025)).toBe('1.00 KB');
        expect(formatSize(1048575)).toBe('1024.00 KB');
      });

      it('handles very large numbers beyond TB', () => {
        // The function only supports up to TB, so very large numbers will show as TB
        expect(formatSize(1024 * 1024 * 1024 * 1024 * 1024)).toBe('1024.00 TB');
        expect(formatSize(Number.MAX_SAFE_INTEGER)).toContain('TB');
      });

      it('handles negative numbers', () => {
        // Negative numbers will result in NaN for the log calculation
        expect(formatSize(-1024)).toContain('NaN');
        expect(formatSize(-0)).toBe('0 B');
      });

      it('handles decimal numbers', () => {
        expect(formatSize(1536.5)).toBe('1.50 KB');
        expect(formatSize(1023.9)).toBe('1023.9 B');
      });

      it('handles very small positive numbers', () => {
        expect(formatSize(1)).toBe('1 B');
        expect(formatSize(0.5)).toBe('0.5 B');
        expect(formatSize(0.1)).toBe('0.1 B');
      });
    });

    describe('normalizeLineEndings', () => {
      it('converts CRLF to LF', () => {
        expect(normalizeLineEndings('line1\r\nline2\r\nline3')).toBe('line1\nline2\nline3');
      });

      it('leaves LF unchanged', () => {
        expect(normalizeLineEndings('line1\nline2\nline3')).toBe('line1\nline2\nline3');
      });

      it('handles mixed line endings', () => {
        expect(normalizeLineEndings('line1\r\nline2\nline3\r\n')).toBe('line1\nline2\nline3\n');
      });

      it('handles empty string', () => {
        expect(normalizeLineEndings('')).toBe('');
      });
    });

    describe('createUnifiedDiff', () => {
      it('creates diff for simple changes', () => {
        const original = 'line1\nline2\nline3';
        const modified = 'line1\nmodified line2\nline3';
        const diff = createUnifiedDiff(original, modified, 'test.txt');
        
        expect(diff).toContain('--- test.txt');
        expect(diff).toContain('+++ test.txt');
        expect(diff).toContain('-line2');
        expect(diff).toContain('+modified line2');
      });

      it('handles CRLF normalization', () => {
        const original = 'line1\r\nline2\r\n';
        const modified = 'line1\nmodified line2\n';
        const diff = createUnifiedDiff(original, modified);
        
        expect(diff).toContain('-line2');
        expect(diff).toContain('+modified line2');
      });

      it('handles identical content', () => {
        const content = 'line1\nline2\nline3';
        const diff = createUnifiedDiff(content, content);
        
        // Should not contain any +/- lines for identical content (excluding header lines)
        expect(diff.split('\n').filter((line: string) => line.startsWith('+++') || line.startsWith('---'))).toHaveLength(2);
        expect(diff.split('\n').filter((line: string) => line.startsWith('+') && !line.startsWith('+++'))).toHaveLength(0);
        expect(diff.split('\n').filter((line: string) => line.startsWith('-') && !line.startsWith('---'))).toHaveLength(0);
      });

      it('handles empty content', () => {
        const diff = createUnifiedDiff('', '');
        expect(diff).toContain('--- file');
        expect(diff).toContain('+++ file');
      });

      it('handles default filename parameter', () => {
        const diff = createUnifiedDiff('old', 'new');
        expect(diff).toContain('--- file');
        expect(diff).toContain('+++ file');
      });

      it('handles custom filename', () => {
        const diff = createUnifiedDiff('old', 'new', 'custom.txt');
        expect(diff).toContain('--- custom.txt');
        expect(diff).toContain('+++ custom.txt');
      });
    });
  });

  describe('Security & Validation Functions', () => {
    describe('validatePath', () => {
      // Use Windows-compatible paths for testing
      const allowedDirs = process.platform === 'win32' ? ['C:\\Users\\test', 'C:\\temp'] : ['/home/user', '/tmp'];

      beforeEach(() => {
        mockFs.realpath.mockImplementation(async (path: any) => path.toString());
      });

      it('validates allowed paths', async () => {
        const testPath = process.platform === 'win32' ? 'C:\\Users\\test\\file.txt' : '/home/user/file.txt';
        const result = await validatePath(testPath);
        expect(result).toBe(testPath);
      });

      it('rejects disallowed paths', async () => {
        const testPath = process.platform === 'win32' ? 'C:\\Windows\\System32\\file.txt' : '/etc/passwd';
        await expect(validatePath(testPath))
          .rejects.toThrow('Access denied - path outside allowed directories');
      });

      it('handles non-existent files by checking parent directory', async () => {
        const newFilePath = process.platform === 'win32' ? 'C:\\Users\\test\\newfile.txt' : '/home/user/newfile.txt';
        const parentPath = process.platform === 'win32' ? 'C:\\Users\\test' : '/home/user';
        
        // Create an error with the ENOENT code that the implementation checks for
        const enoentError = new Error('ENOENT') as NodeJS.ErrnoException;
        enoentError.code = 'ENOENT';
        
        mockFs.realpath
          .mockRejectedValueOnce(enoentError)
          .mockResolvedValueOnce(parentPath);
        
        const result = await validatePath(newFilePath);
        expect(result).toBe(path.resolve(newFilePath));
      });

      it('rejects when parent directory does not exist', async () => {
        const newFilePath = process.platform === 'win32' ? 'C:\\Users\\test\\nonexistent\\newfile.txt' : '/home/user/nonexistent/newfile.txt';
        
        // Create errors with the ENOENT code
        const enoentError1 = new Error('ENOENT') as NodeJS.ErrnoException;
        enoentError1.code = 'ENOENT';
        const enoentError2 = new Error('ENOENT') as NodeJS.ErrnoException;
        enoentError2.code = 'ENOENT';
        
        mockFs.realpath
          .mockRejectedValueOnce(enoentError1)
          .mockRejectedValueOnce(enoentError2);
        
        await expect(validatePath(newFilePath))
          .rejects.toThrow('Parent directory does not exist');
      });
    });
  });

  describe('File Operations', () => {
    describe('getFileStats', () => {
      it('returns file statistics', async () => {
        const mockStats = {
          size: 1024,
          birthtime: new Date('2023-01-01'),
          mtime: new Date('2023-01-02'),
          atime: new Date('2023-01-03'),
          isDirectory: () => false,
          isFile: () => true,
          mode: 0o644
        };
        
        mockFs.stat.mockResolvedValueOnce(mockStats as any);
        
        const result = await getFileStats('/test/file.txt');
        
        expect(result).toEqual({
          size: 1024,
          created: new Date('2023-01-01'),
          modified: new Date('2023-01-02'),
          accessed: new Date('2023-01-03'),
          isDirectory: false,
          isFile: true,
          permissions: '644'
        });
      });

      it('handles directory statistics', async () => {
        const mockStats = {
          size: 4096,
          birthtime: new Date('2023-01-01'),
          mtime: new Date('2023-01-02'),
          atime: new Date('2023-01-03'),
          isDirectory: () => true,
          isFile: () => false,
          mode: 0o755
        };
        
        mockFs.stat.mockResolvedValueOnce(mockStats as any);
        
        const result = await getFileStats('/test/dir');
        
        expect(result.isDirectory).toBe(true);
        expect(result.isFile).toBe(false);
        expect(result.permissions).toBe('755');
      });
    });

    describe('readFileContent', () => {
      it('reads file with default encoding', async () => {
        mockFs.readFile.mockResolvedValueOnce('file content');
        
        const result = await readFileContent('/test/file.txt');
        
        expect(result).toBe('file content');
        expect(mockFs.readFile).toHaveBeenCalledWith('/test/file.txt', 'utf-8');
      });

      it('reads file with custom encoding', async () => {
        mockFs.readFile.mockResolvedValueOnce('file content');
        
        const result = await readFileContent('/test/file.txt', 'ascii');
        
        expect(result).toBe('file content');
        expect(mockFs.readFile).toHaveBeenCalledWith('/test/file.txt', 'ascii');
      });
    });

    describe('writeFileContent', () => {
      it('writes file content', async () => {
        mockFs.writeFile.mockResolvedValueOnce(undefined);
        
        await writeFileContent('/test/file.txt', 'new content');
        
        expect(mockFs.writeFile).toHaveBeenCalledWith('/test/file.txt', 'new content', { encoding: "utf-8", flag: 'wx' });
      });
    });

  });

  describe('Search & Filtering Functions', () => {
    describe('searchFilesWithValidation', () => {
      beforeEach(() => {
        mockFs.realpath.mockImplementation(async (path: any) => path.toString());
      });


      it('excludes files matching exclude patterns', async () => {
        const mockEntries = [
          { name: 'test.txt', isDirectory: () => false },
          { name: 'test.log', isDirectory: () => false },
          { name: 'node_modules', isDirectory: () => true }
        ];
        
        mockFs.readdir.mockResolvedValueOnce(mockEntries as any);
        
        const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir';
        const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed'];
        
        // Mock realpath to return the same path for validation to pass
        mockFs.realpath.mockImplementation(async (inputPath: any) => {
          const pathStr = inputPath.toString();
          // Return the path as-is for validation
          return pathStr;
        });
        
        const result = await searchFilesWithValidation(
          testDir,
          '*test*',
          allowedDirs,
          { excludePatterns: ['*.log', 'node_modules'] }
        );
        
        const expectedResult = process.platform === 'win32' ? 'C:\\allowed\\dir\\test.txt' : '/allowed/dir/test.txt';
        expect(result).toEqual([expectedResult]);
      });

      it('handles validation errors during search', async () => {
        const mockEntries = [
          { name: 'test.txt', isDirectory: () => false },
          { name: 'invalid_file.txt', isDirectory: () => false }
        ];
        
        mockFs.readdir.mockResolvedValueOnce(mockEntries as any);
        
        // Mock validatePath to throw error for invalid_file.txt
        mockFs.realpath.mockImplementation(async (path: any) => {
          if (path.toString().includes('invalid_file.txt')) {
            throw new Error('Access denied');
          }
          return path.toString();
        });
        
        const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir';
        const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed'];
        
        const result = await searchFilesWithValidation(
          testDir,
          '*test*',
          allowedDirs,
          {}
        );
        
        // Should only return the valid file, skipping the invalid one
        const expectedResult = process.platform === 'win32' ? 'C:\\allowed\\dir\\test.txt' : '/allowed/dir/test.txt';
        expect(result).toEqual([expectedResult]);
      });

      it('handles complex exclude patterns with wildcards', async () => {
        const mockEntries = [
          { name: 'test.txt', isDirectory: () => false },
          { name: 'test.backup', isDirectory: () => false },
          { name: 'important_test.js', isDirectory: () => false }
        ];
        
        mockFs.readdir.mockResolvedValueOnce(mockEntries as any);
        
        const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir';
        const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed'];
        
        const result = await searchFilesWithValidation(
          testDir,
          '*test*',
          allowedDirs,
          { excludePatterns: ['*.backup'] }
        );
        
        const expectedResults = process.platform === 'win32' ? [
          'C:\\allowed\\dir\\test.txt',
          'C:\\allowed\\dir\\important_test.js'
        ] : [
          '/allowed/dir/test.txt',
          '/allowed/dir/important_test.js'
        ];
        expect(result).toEqual(expectedResults);
      });
    });
  });

  describe('File Editing Functions', () => {
    describe('applyFileEdits', () => {
      beforeEach(() => {
        mockFs.readFile.mockResolvedValue('line1\nline2\nline3\n');
        mockFs.writeFile.mockResolvedValue(undefined);
      });

      it('applies simple text replacement', async () => {
        const edits = [
          { oldText: 'line2', newText: 'modified line2' }
        ];
        
        mockFs.rename.mockResolvedValueOnce(undefined);
        
        const result = await applyFileEdits('/test/file.txt', edits, false);
        
        expect(result).toContain('modified line2');
        // Should write to temporary file then rename
        expect(mockFs.writeFile).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
          'line1\nmodified line2\nline3\n',
          'utf-8'
        );
        expect(mockFs.rename).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
          '/test/file.txt'
        );
      });

      it('handles dry run mode', async () => {
        const edits = [
          { oldText: 'line2', newText: 'modified line2' }
        ];
        
        const result = await applyFileEdits('/test/file.txt', edits, true);
        
        expect(result).toContain('modified line2');
        expect(mockFs.writeFile).not.toHaveBeenCalled();
      });

      it('applies multiple edits sequentially', async () => {
        const edits = [
          { oldText: 'line1', newText: 'first line' },
          { oldText: 'line3', newText: 'third line' }
        ];
        
        mockFs.rename.mockResolvedValueOnce(undefined);
        
        await applyFileEdits('/test/file.txt', edits, false);
        
        expect(mockFs.writeFile).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
          'first line\nline2\nthird line\n',
          'utf-8'
        );
        expect(mockFs.rename).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
          '/test/file.txt'
        );
      });

      it('handles whitespace-flexible matching', async () => {
        mockFs.readFile.mockResolvedValue('  line1\n    line2\n  line3\n');
        
        const edits = [
          { oldText: 'line2', newText: 'modified line2' }
        ];
        
        mockFs.rename.mockResolvedValueOnce(undefined);
        
        await applyFileEdits('/test/file.txt', edits, false);
        
        expect(mockFs.writeFile).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
          '  line1\n    modified line2\n  line3\n',
          'utf-8'
        );
        expect(mockFs.rename).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
          '/test/file.txt'
        );
      });

      it('throws error for non-matching edits', async () => {
        const edits = [
          { oldText: 'nonexistent line', newText: 'replacement' }
        ];
        
        await expect(applyFileEdits('/test/file.txt', edits, false))
          .rejects.toThrow('Could not find exact match for edit');
      });

      it('handles complex multi-line edits with indentation', async () => {
        mockFs.readFile.mockResolvedValue('function test() {\n  console.log("hello");\n  return true;\n}');
        
        const edits = [
          { 
            oldText: '  console.log("hello");\n  return true;', 
            newText: '  console.log("world");\n  console.log("test");\n  return false;' 
          }
        ];
        
        mockFs.rename.mockResolvedValueOnce(undefined);
        
        await applyFileEdits('/test/file.js', edits, false);
        
        expect(mockFs.writeFile).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
          'function test() {\n  console.log("world");\n  console.log("test");\n  return false;\n}',
          'utf-8'
        );
        expect(mockFs.rename).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
          '/test/file.js'
        );
      });

      it('handles edits with different indentation patterns', async () => {
        mockFs.readFile.mockResolvedValue('    if (condition) {\n        doSomething();\n    }');
        
        const edits = [
          { 
            oldText: 'doSomething();', 
            newText: 'doSomethingElse();\n        doAnotherThing();' 
          }
        ];
        
        mockFs.rename.mockResolvedValueOnce(undefined);
        
        await applyFileEdits('/test/file.js', edits, false);
        
        expect(mockFs.writeFile).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
          '    if (condition) {\n        doSomethingElse();\n        doAnotherThing();\n    }',
          'utf-8'
        );
        expect(mockFs.rename).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
          '/test/file.js'
        );
      });

      it('handles CRLF line endings in file content', async () => {
        mockFs.readFile.mockResolvedValue('line1\r\nline2\r\nline3\r\n');
        
        const edits = [
          { oldText: 'line2', newText: 'modified line2' }
        ];
        
        mockFs.rename.mockResolvedValueOnce(undefined);
        
        await applyFileEdits('/test/file.txt', edits, false);
        
        expect(mockFs.writeFile).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
          'line1\nmodified line2\nline3\n',
          'utf-8'
        );
        expect(mockFs.rename).toHaveBeenCalledWith(
          expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
          '/test/file.txt'
        );
      });
    });

    describe('tailFile', () => {
      it('handles empty files', async () => {
        mockFs.stat.mockResolvedValue({ size: 0 } as any);
        
        const result = await tailFile('/test/empty.txt', 5);
        
        expect(result).toBe('');
        expect(mockFs.open).not.toHaveBeenCalled();
      });

      it('calls stat to check file size', async () => {
        mockFs.stat.mockResolvedValue({ size: 100 } as any);
        
        // Mock file handle with proper typing
        const mockFileHandle = {
          read: vi.fn(),
          close: vi.fn()
        } as any;
        
        mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
        mockFileHandle.close.mockResolvedValue(undefined);
        
        mockFs.open.mockResolvedValue(mockFileHandle);
        
        await tailFile('/test/file.txt', 2);
        
        expect(mockFs.stat).toHaveBeenCalledWith('/test/file.txt');
        expect(mockFs.open).toHaveBeenCalledWith('/test/file.txt', 'r');
      });

      it('handles files with content and returns last lines', async () => {
        mockFs.stat.mockResolvedValue({ size: 50 } as any);
        
        const mockFileHandle = {
          read: vi.fn(),
          close: vi.fn()
        } as any;
        
        // Simulate reading file content in chunks
        mockFileHandle.read
          .mockResolvedValueOnce({ bytesRead: 20, buffer: Buffer.from('line3\nline4\nline5\n') })
          .mockResolvedValueOnce({ bytesRead: 0 });
        mockFileHandle.close.mockResolvedValue(undefined);
        
        mockFs.open.mockResolvedValue(mockFileHandle);
        
        const result = await tailFile('/test/file.txt', 2);
        
        expect(mockFileHandle.close).toHaveBeenCalled();
      });

      it('handles read errors gracefully', async () => {
        mockFs.stat.mockResolvedValue({ size: 100 } as any);
        
        const mockFileHandle = {
          read: vi.fn(),
          close: vi.fn()
        } as any;
        
        mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
        mockFileHandle.close.mockResolvedValue(undefined);
        
        mockFs.open.mockResolvedValue(mockFileHandle);
        
        await tailFile('/test/file.txt', 5);
        
        expect(mockFileHandle.close).toHaveBeenCalled();
      });
    });

    describe('headFile', () => {
      it('opens file for reading', async () => {
        // Mock file handle with proper typing
        const mockFileHandle = {
          read: vi.fn(),
          close: vi.fn()
        } as any;
        
        mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
        mockFileHandle.close.mockResolvedValue(undefined);
        
        mockFs.open.mockResolvedValue(mockFileHandle);
        
        await headFile('/test/file.txt', 2);
        
        expect(mockFs.open).toHaveBeenCalledWith('/test/file.txt', 'r');
      });

      it('handles files with content and returns first lines', async () => {
        const mockFileHandle = {
          read: vi.fn(),
          close: vi.fn()
        } as any;
        
        // Simulate reading file content with newlines
        mockFileHandle.read
          .mockResolvedValueOnce({ bytesRead: 20, buffer: Buffer.from('line1\nline2\nline3\n') })
          .mockResolvedValueOnce({ bytesRead: 0 });
        mockFileHandle.close.mockResolvedValue(undefined);
        
        mockFs.open.mockResolvedValue(mockFileHandle);
        
        const result = await headFile('/test/file.txt', 2);
        
        expect(mockFileHandle.close).toHaveBeenCalled();
      });

      it('handles files with leftover content', async () => {
        const mockFileHandle = {
          read: vi.fn(),
          close: vi.fn()
        } as any;
        
        // Simulate reading file content without final newline
        mockFileHandle.read
          .mockResolvedValueOnce({ bytesRead: 15, buffer: Buffer.from('line1\nline2\nend') })
          .mockResolvedValueOnce({ bytesRead: 0 });
        mockFileHandle.close.mockResolvedValue(undefined);
        
        mockFs.open.mockResolvedValue(mockFileHandle);
        
        const result = await headFile('/test/file.txt', 5);
        
        expect(mockFileHandle.close).toHaveBeenCalled();
      });

      it('handles reaching requested line count', async () => {
        const mockFileHandle = {
          read: vi.fn(),
          close: vi.fn()
        } as any;
        
        // Simulate reading exactly the requested number of lines
        mockFileHandle.read
          .mockResolvedValueOnce({ bytesRead: 12, buffer: Buffer.from('line1\nline2\n') })
          .mockResolvedValueOnce({ bytesRead: 0 });
        mockFileHandle.close.mockResolvedValue(undefined);
        
        mockFs.open.mockResolvedValue(mockFileHandle);
        
        const result = await headFile('/test/file.txt', 2);
        
        expect(mockFileHandle.close).toHaveBeenCalled();
      });
    });
  });
});

```

## /src/filesystem/__tests__/path-utils.test.ts

```ts path="/src/filesystem/__tests__/path-utils.test.ts" 
import { describe, it, expect, afterEach } from 'vitest';
import { normalizePath, expandHome, convertToWindowsPath } from '../path-utils.js';

describe('Path Utilities', () => {
  describe('convertToWindowsPath', () => {
    it('leaves Unix paths unchanged', () => {
      expect(convertToWindowsPath('/usr/local/bin'))
        .toBe('/usr/local/bin');
      expect(convertToWindowsPath('/home/user/some path'))
        .toBe('/home/user/some path');
    });

    it('never converts WSL paths (they work correctly in WSL with Node.js fs)', () => {
      // WSL paths should NEVER be converted, regardless of platform
      // They are valid Linux paths that work with Node.js fs operations inside WSL
      expect(convertToWindowsPath('/mnt/c/NS/MyKindleContent'))
        .toBe('/mnt/c/NS/MyKindleContent');
      expect(convertToWindowsPath('/mnt/d/Documents'))
        .toBe('/mnt/d/Documents');
    });

    it('converts Unix-style Windows paths only on Windows platform', () => {
      // On Windows, /c/ style paths should be converted
      if (process.platform === 'win32') {
        expect(convertToWindowsPath('/c/NS/MyKindleContent'))
          .toBe('C:\\NS\\MyKindleContent');
      } else {
        // On Linux, leave them unchanged
        expect(convertToWindowsPath('/c/NS/MyKindleContent'))
          .toBe('/c/NS/MyKindleContent');
      }
    });

    it('leaves Windows paths unchanged but ensures backslashes', () => {
      expect(convertToWindowsPath('C:\\NS\\MyKindleContent'))
        .toBe('C:\\NS\\MyKindleContent');
      expect(convertToWindowsPath('C:/NS/MyKindleContent'))
        .toBe('C:\\NS\\MyKindleContent');
    });

    it('handles Windows paths with spaces', () => {
      expect(convertToWindowsPath('C:\\Program Files\\Some App'))
        .toBe('C:\\Program Files\\Some App');
      expect(convertToWindowsPath('C:/Program Files/Some App'))
        .toBe('C:\\Program Files\\Some App');
    });

    it('handles drive letter paths based on platform', () => {
      // WSL paths should never be converted
      expect(convertToWindowsPath('/mnt/d/some/path'))
        .toBe('/mnt/d/some/path');

      if (process.platform === 'win32') {
        // On Windows, Unix-style paths like /d/ should be converted
        expect(convertToWindowsPath('/d/some/path'))
          .toBe('D:\\some\\path');
      } else {
        // On Linux, /d/ is just a regular Unix path
        expect(convertToWindowsPath('/d/some/path'))
          .toBe('/d/some/path');
      }
    });
  });

  describe('normalizePath', () => {
    it('preserves Unix paths', () => {
      expect(normalizePath('/usr/local/bin'))
        .toBe('/usr/local/bin');
      expect(normalizePath('/home/user/some path'))
        .toBe('/home/user/some path');
      expect(normalizePath('"/usr/local/some app/"'))
        .toBe('/usr/local/some app');
    });

    it('removes surrounding quotes', () => {
      expect(normalizePath('"C:\\NS\\My Kindle Content"'))
        .toBe('C:\\NS\\My Kindle Content');
    });

    it('normalizes backslashes', () => {
      expect(normalizePath('C:\\\\NS\\\\MyKindleContent'))
        .toBe('C:\\NS\\MyKindleContent');
    });

    it('converts forward slashes to backslashes on Windows', () => {
      expect(normalizePath('C:/NS/MyKindleContent'))
        .toBe('C:\\NS\\MyKindleContent');
    });

    it('always preserves WSL paths (they work correctly in WSL)', () => {
      // WSL paths should ALWAYS be preserved, regardless of platform
      // This is the fix for issue #2795
      expect(normalizePath('/mnt/c/NS/MyKindleContent'))
        .toBe('/mnt/c/NS/MyKindleContent');
      expect(normalizePath('/mnt/d/Documents'))
        .toBe('/mnt/d/Documents');
    });

    it('handles Unix-style Windows paths', () => {
      // On Windows, /c/ paths should be converted
      if (process.platform === 'win32') {
        expect(normalizePath('/c/NS/MyKindleContent'))
          .toBe('C:\\NS\\MyKindleContent');
      } else if (process.platform === 'linux') {
        // On Linux, /c/ is just a regular Unix path
        expect(normalizePath('/c/NS/MyKindleContent'))
          .toBe('/c/NS/MyKindleContent');
      }
    });

    it('handles paths with spaces and mixed slashes', () => {
      expect(normalizePath('C:/NS/My Kindle Content'))
        .toBe('C:\\NS\\My Kindle Content');
      // WSL paths should always be preserved
      expect(normalizePath('/mnt/c/NS/My Kindle Content'))
        .toBe('/mnt/c/NS/My Kindle Content');
      expect(normalizePath('C:\\Program Files (x86)\\App Name'))
        .toBe('C:\\Program Files (x86)\\App Name');
      expect(normalizePath('"C:\\Program Files\\App Name"'))
        .toBe('C:\\Program Files\\App Name');
      expect(normalizePath('  C:\\Program Files\\App Name  '))
        .toBe('C:\\Program Files\\App Name');
    });

    it('preserves spaces in all path formats', () => {
      // WSL paths should always be preserved
      expect(normalizePath('/mnt/c/Program Files/App Name'))
        .toBe('/mnt/c/Program Files/App Name');

      if (process.platform === 'win32') {
        // On Windows, Unix-style paths like /c/ should be converted
        expect(normalizePath('/c/Program Files/App Name'))
          .toBe('C:\\Program Files\\App Name');
      } else {
        // On Linux, /c/ is just a regular Unix path
        expect(normalizePath('/c/Program Files/App Name'))
          .toBe('/c/Program Files/App Name');
      }
      expect(normalizePath('C:/Program Files/App Name'))
        .toBe('C:\\Program Files\\App Name');
    });

    it('handles special characters in paths', () => {
      // Test ampersand in path
      expect(normalizePath('C:\\NS\\Sub&Folder'))
        .toBe('C:\\NS\\Sub&Folder');
      expect(normalizePath('C:/NS/Sub&Folder'))
        .toBe('C:\\NS\\Sub&Folder');
      // WSL paths should always be preserved
      expect(normalizePath('/mnt/c/NS/Sub&Folder'))
        .toBe('/mnt/c/NS/Sub&Folder');

      // Test tilde in path (short names in Windows)
      expect(normalizePath('C:\\NS\\MYKIND~1'))
        .toBe('C:\\NS\\MYKIND~1');
      expect(normalizePath('/Users/NEMANS~1/FOLDER~2/SUBFO~1/Public/P12PST~1'))
        .toBe('/Users/NEMANS~1/FOLDER~2/SUBFO~1/Public/P12PST~1');

      // Test other special characters
      expect(normalizePath('C:\\Path with #hash'))
        .toBe('C:\\Path with #hash');
      expect(normalizePath('C:\\Path with (parentheses)'))
        .toBe('C:\\Path with (parentheses)');
      expect(normalizePath('C:\\Path with [brackets]'))
        .toBe('C:\\Path with [brackets]');
      expect(normalizePath('C:\\Path with @at+plus$dollar%percent'))
        .toBe('C:\\Path with @at+plus$dollar%percent');
    });

    it('capitalizes lowercase drive letters for Windows paths', () => {
      expect(normalizePath('c:/windows/system32'))
        .toBe('C:\\windows\\system32');
      // WSL paths should always be preserved
      expect(normalizePath('/mnt/d/my/folder'))
        .toBe('/mnt/d/my/folder');

      if (process.platform === 'win32') {
        // On Windows, Unix-style paths should be converted and capitalized
        expect(normalizePath('/e/another/folder'))
          .toBe('E:\\another\\folder');
      } else {
        // On Linux, /e/ is just a regular Unix path
        expect(normalizePath('/e/another/folder'))
          .toBe('/e/another/folder');
      }
    });

    it('handles UNC paths correctly', () => {
      // UNC paths should preserve the leading double backslash
      const uncPath = '\\\\SERVER\\share\\folder';
      expect(normalizePath(uncPath)).toBe('\\\\SERVER\\share\\folder');
      
      // Test UNC path with double backslashes that need normalization
      const uncPathWithDoubles = '\\\\\\\\SERVER\\\\share\\\\folder';
      expect(normalizePath(uncPathWithDoubles)).toBe('\\\\SERVER\\share\\folder');
    });

    it('returns normalized non-Windows/WSL/Unix-style Windows paths as is after basic normalization', () => {
      // A path that looks somewhat absolute but isn't a drive or recognized Unix root for Windows conversion
      // These paths should be preserved as-is (not converted to Windows C:\ format or WSL format)
      const otherAbsolutePath = '\\someserver\\share\\file';
      expect(normalizePath(otherAbsolutePath)).toBe(otherAbsolutePath);
    });
  });

  describe('expandHome', () => {
    it('expands ~ to home directory', () => {
      const result = expandHome('~/test');
      expect(result).toContain('test');
      expect(result).not.toContain('~');
    });

    it('expands bare ~ to home directory', () => {
      const result = expandHome('~');
      expect(result).not.toContain('~');
      expect(result.length).toBeGreaterThan(0);
    });

    it('leaves other paths unchanged', () => {
      expect(expandHome('C:/test')).toBe('C:/test');
    });
  });

  describe('WSL path handling (issue #2795 fix)', () => {
    // Save original platform
    const originalPlatform = process.platform;

    afterEach(() => {
      // Restore platform after each test
      Object.defineProperty(process, 'platform', {
        value: originalPlatform,
        writable: true,
        configurable: true
      });
    });

    it('should NEVER convert WSL paths - they work correctly in WSL with Node.js fs', () => {
      // The key insight: When running `wsl npx ...`, Node.js runs INSIDE WSL (process.platform === 'linux')
      // and /mnt/c/ paths work correctly with Node.js fs operations in that environment.
      // Converting them to C:\ format breaks fs operations because Windows paths don't work inside WSL.

      // Mock Linux platform (inside WSL)
      Object.defineProperty(process, 'platform', {
        value: 'linux',
        writable: true,
        configurable: true
      });

      // WSL paths should NOT be converted, even inside WSL
      expect(normalizePath('/mnt/c/Users/username/folder'))
        .toBe('/mnt/c/Users/username/folder');

      expect(normalizePath('/mnt/d/Documents/project'))
        .toBe('/mnt/d/Documents/project');
    });

    it('should also preserve WSL paths when running on Windows', () => {
      // Mock Windows platform
      Object.defineProperty(process, 'platform', {
        value: 'win32',
        writable: true,
        configurable: true
      });

      // WSL paths should still be preserved (though they wouldn't be accessible from Windows Node.js)
      expect(normalizePath('/mnt/c/Users/username/folder'))
        .toBe('/mnt/c/Users/username/folder');

      expect(normalizePath('/mnt/d/Documents/project'))
        .toBe('/mnt/d/Documents/project');
    });

    it('should convert Unix-style Windows paths (/c/) only when running on Windows (win32)', () => {
      // Mock process.platform to be 'win32' (Windows)
      Object.defineProperty(process, 'platform', {
        value: 'win32',
        writable: true,
        configurable: true
      });

      // Unix-style Windows paths like /c/ should be converted on Windows
      expect(normalizePath('/c/Users/username/folder'))
        .toBe('C:\\Users\\username\\folder');

      expect(normalizePath('/d/Documents/project'))
        .toBe('D:\\Documents\\project');
    });

    it('should NOT convert Unix-style paths (/c/) when running inside WSL (linux)', () => {
      // Mock process.platform to be 'linux' (WSL/Linux)
      Object.defineProperty(process, 'platform', {
        value: 'linux',
        writable: true,
        configurable: true
      });

      // When on Linux, /c/ is just a regular Unix directory, not a drive letter
      expect(normalizePath('/c/some/path'))
        .toBe('/c/some/path');

      expect(normalizePath('/d/another/path'))
        .toBe('/d/another/path');
    });

    it('should preserve regular Unix paths on all platforms', () => {
      // Test on Linux
      Object.defineProperty(process, 'platform', {
        value: 'linux',
        writable: true,
        configurable: true
      });

      expect(normalizePath('/home/user/documents'))
        .toBe('/home/user/documents');

      expect(normalizePath('/var/log/app'))
        .toBe('/var/log/app');

      // Test on Windows (though these paths wouldn't work on Windows)
      Object.defineProperty(process, 'platform', {
        value: 'win32',
        writable: true,
        configurable: true
      });

      expect(normalizePath('/home/user/documents'))
        .toBe('/home/user/documents');

      expect(normalizePath('/var/log/app'))
        .toBe('/var/log/app');
    });

    it('reproduces exact scenario from issue #2795', () => {
      // Simulate running inside WSL: wsl npx @modelcontextprotocol/server-filesystem /mnt/c/Users/username/folder
      Object.defineProperty(process, 'platform', {
        value: 'linux',
        writable: true,
        configurable: true
      });

      // This is the exact path from the issue
      const inputPath = '/mnt/c/Users/username/folder';
      const result = normalizePath(inputPath);

      // Should NOT convert to C:\Users\username\folder
      expect(result).toBe('/mnt/c/Users/username/folder');
      expect(result).not.toContain('C:');
      expect(result).not.toContain('\\');
    });

    it('should handle relative path slash conversion based on platform', () => {
      // This test verifies platform-specific behavior naturally without mocking
      // On Windows: forward slashes converted to backslashes
      // On Linux/Unix: forward slashes preserved
      const relativePath = 'some/relative/path';
      const result = normalizePath(relativePath);

      if (originalPlatform === 'win32') {
        expect(result).toBe('some\\relative\\path');
      } else {
        expect(result).toBe('some/relative/path');
      }
    });
  });
});

```

## /src/filesystem/__tests__/path-validation.test.ts

```ts path="/src/filesystem/__tests__/path-validation.test.ts" 
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import * as path from 'path';
import * as fs from 'fs/promises';
import * as os from 'os';
import { isPathWithinAllowedDirectories } from '../path-validation.js';

/**
 * Check if the current environment supports symlink creation
 */
async function checkSymlinkSupport(): Promise<boolean> {
  const testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'symlink-test-'));
  try {
    const targetFile = path.join(testDir, 'target.txt');
    const linkFile = path.join(testDir, 'link.txt');
    
    await fs.writeFile(targetFile, 'test');
    await fs.symlink(targetFile, linkFile);
    
    // If we get here, symlinks are supported
    return true;
  } catch (error) {
    // EPERM indicates no symlink permissions
    if ((error as NodeJS.ErrnoException).code === 'EPERM') {
      return false;
    }
    // Other errors might indicate a real problem
    throw error;
  } finally {
    await fs.rm(testDir, { recursive: true, force: true });
  }
}

// Global variable to store symlink support status
let symlinkSupported: boolean | null = null;

/**
 * Get cached symlink support status, checking once per test run
 */
async function getSymlinkSupport(): Promise<boolean> {
  if (symlinkSupported === null) {
    symlinkSupported = await checkSymlinkSupport();
    if (!symlinkSupported) {
      console.log('\n⚠️  Symlink tests will be skipped - symlink creation not supported in this environment');
      console.log('   On Windows, enable Developer Mode or run as Administrator to enable symlink tests');
    }
  }
  return symlinkSupported;
}

describe('Path Validation', () => {
  it('allows exact directory match', () => {
    const allowed = ['/home/user/project'];
    expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(true);
  });

  it('allows subdirectories', () => {
    const allowed = ['/home/user/project'];
    expect(isPathWithinAllowedDirectories('/home/user/project/src', allowed)).toBe(true);
    expect(isPathWithinAllowedDirectories('/home/user/project/src/index.js', allowed)).toBe(true);
    expect(isPathWithinAllowedDirectories('/home/user/project/deeply/nested/file.txt', allowed)).toBe(true);
  });

  it('blocks similar directory names (prefix vulnerability)', () => {
    const allowed = ['/home/user/project'];
    expect(isPathWithinAllowedDirectories('/home/user/project2', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/home/user/project_backup', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/home/user/project-old', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/home/user/projectile', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/home/user/project.bak', allowed)).toBe(false);
  });

  it('blocks paths outside allowed directories', () => {
    const allowed = ['/home/user/project'];
    expect(isPathWithinAllowedDirectories('/home/user/other', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/etc/passwd', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/home/user', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/', allowed)).toBe(false);
  });

  it('handles multiple allowed directories', () => {
    const allowed = ['/home/user/project1', '/home/user/project2'];
    expect(isPathWithinAllowedDirectories('/home/user/project1/src', allowed)).toBe(true);
    expect(isPathWithinAllowedDirectories('/home/user/project2/src', allowed)).toBe(true);
    expect(isPathWithinAllowedDirectories('/home/user/project3', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/home/user/project1_backup', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/home/user/project2-old', allowed)).toBe(false);
  });

  it('blocks parent and sibling directories', () => {
    const allowed = ['/test/allowed'];

    // Parent directory
    expect(isPathWithinAllowedDirectories('/test', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/', allowed)).toBe(false);

    // Sibling with common prefix
    expect(isPathWithinAllowedDirectories('/test/allowed_sibling', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/test/allowed2', allowed)).toBe(false);
  });

  it('handles paths with special characters', () => {
    const allowed = ['/home/user/my-project (v2)'];

    expect(isPathWithinAllowedDirectories('/home/user/my-project (v2)', allowed)).toBe(true);
    expect(isPathWithinAllowedDirectories('/home/user/my-project (v2)/src', allowed)).toBe(true);
    expect(isPathWithinAllowedDirectories('/home/user/my-project (v2)_backup', allowed)).toBe(false);
    expect(isPathWithinAllowedDirectories('/home/user/my-project', allowed)).toBe(false);
  });

  describe('Input validation', () => {
    it('rejects empty inputs', () => {
      const allowed = ['/home/user/project'];

      expect(isPathWithinAllowedDirectories('', allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/project', [])).toBe(false);
    });

    it('handles trailing separators correctly', () => {
      const allowed = ['/home/user/project'];

      // Path with trailing separator should still match
      expect(isPathWithinAllowedDirectories('/home/user/project/', allowed)).toBe(true);

      // Allowed directory with trailing separator
      const allowedWithSep = ['/home/user/project/'];
      expect(isPathWithinAllowedDirectories('/home/user/project', allowedWithSep)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/project/', allowedWithSep)).toBe(true);

      // Should still block similar names with or without trailing separators
      expect(isPathWithinAllowedDirectories('/home/user/project2', allowedWithSep)).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/project2', allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/project2/', allowed)).toBe(false);
    });

    it('skips empty directory entries in allowed list', () => {
      const allowed = ['', '/home/user/project', ''];
      expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/project/src', allowed)).toBe(true);

      // Should still validate properly with empty entries
      expect(isPathWithinAllowedDirectories('/home/user/other', allowed)).toBe(false);
    });

    it('handles Windows paths with trailing separators', () => {
      if (path.sep === '\\') {
        const allowed = ['C:\\Users\\project'];

        // Path with trailing separator
        expect(isPathWithinAllowedDirectories('C:\\Users\\project\\', allowed)).toBe(true);

        // Allowed with trailing separator
        const allowedWithSep = ['C:\\Users\\project\\'];
        expect(isPathWithinAllowedDirectories('C:\\Users\\project', allowedWithSep)).toBe(true);
        expect(isPathWithinAllowedDirectories('C:\\Users\\project\\', allowedWithSep)).toBe(true);

        // Should still block similar names
        expect(isPathWithinAllowedDirectories('C:\\Users\\project2\\', allowed)).toBe(false);
      }
    });
  });

  describe('Error handling', () => {
    it('normalizes relative paths to absolute', () => {
      const allowed = [process.cwd()];

      // Relative paths get normalized to absolute paths based on cwd
      expect(isPathWithinAllowedDirectories('relative/path', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('./file', allowed)).toBe(true);

      // Parent directory references that escape allowed directory
      const parentAllowed = ['/home/user/project'];
      expect(isPathWithinAllowedDirectories('../parent', parentAllowed)).toBe(false);
    });

    it('returns false for relative paths in allowed directories', () => {
      const badAllowed = ['relative/path', '/some/other/absolute/path'];

      // Relative paths in allowed dirs are normalized to absolute based on cwd
      // The normalized 'relative/path' won't match our test path
      expect(isPathWithinAllowedDirectories('/some/other/absolute/path/file', badAllowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/absolute/path/file', badAllowed)).toBe(false);
    });

    it('handles null and undefined inputs gracefully', () => {
      const allowed = ['/home/user/project'];

      // Should return false, not crash
      expect(isPathWithinAllowedDirectories(null as any, allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories(undefined as any, allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('/path', null as any)).toBe(false);
      expect(isPathWithinAllowedDirectories('/path', undefined as any)).toBe(false);
    });
  });

  describe('Unicode and special characters', () => {
    it('handles unicode characters in paths', () => {
      const allowed = ['/home/user/café'];

      expect(isPathWithinAllowedDirectories('/home/user/café', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/café/file', allowed)).toBe(true);

      // Different unicode representation won't match (not normalized)
      const decomposed = '/home/user/cafe\u0301'; // e + combining accent
      expect(isPathWithinAllowedDirectories(decomposed, allowed)).toBe(false);
    });

    it('handles paths with spaces correctly', () => {
      const allowed = ['/home/user/my project'];

      expect(isPathWithinAllowedDirectories('/home/user/my project', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/my project/file', allowed)).toBe(true);

      // Partial matches should fail
      expect(isPathWithinAllowedDirectories('/home/user/my', allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/my proj', allowed)).toBe(false);
    });
  });

  describe('Overlapping allowed directories', () => {
    it('handles nested allowed directories correctly', () => {
      const allowed = ['/home', '/home/user', '/home/user/project'];

      // All paths under /home are allowed
      expect(isPathWithinAllowedDirectories('/home/anything', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/anything', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/project/anything', allowed)).toBe(true);

      // First match wins (most permissive)
      expect(isPathWithinAllowedDirectories('/home/other/deep/path', allowed)).toBe(true);
    });

    it('handles root directory as allowed', () => {
      const allowed = ['/'];

      // Everything is allowed under root (dangerous configuration)
      expect(isPathWithinAllowedDirectories('/', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/any/path', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/etc/passwd', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/secret', allowed)).toBe(true);

      // But only on the same filesystem root
      if (path.sep === '\\') {
        expect(isPathWithinAllowedDirectories('D:\\other', ['/'])).toBe(false);
      }
    });
  });

  describe('Cross-platform behavior', () => {
    it('handles Windows-style paths on Windows', () => {
      if (path.sep === '\\') {
        const allowed = ['C:\\Users\\project'];
        expect(isPathWithinAllowedDirectories('C:\\Users\\project', allowed)).toBe(true);
        expect(isPathWithinAllowedDirectories('C:\\Users\\project\\src', allowed)).toBe(true);
        expect(isPathWithinAllowedDirectories('C:\\Users\\project2', allowed)).toBe(false);
        expect(isPathWithinAllowedDirectories('C:\\Users\\project_backup', allowed)).toBe(false);
      }
    });

    it('handles Unix-style paths on Unix', () => {
      if (path.sep === '/') {
        const allowed = ['/home/user/project'];
        expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(true);
        expect(isPathWithinAllowedDirectories('/home/user/project/src', allowed)).toBe(true);
        expect(isPathWithinAllowedDirectories('/home/user/project2', allowed)).toBe(false);
      }
    });
  });

  describe('Validation Tests - Path Traversal', () => {
    it('blocks path traversal attempts', () => {
      const allowed = ['/home/user/project'];

      // Basic traversal attempts
      expect(isPathWithinAllowedDirectories('/home/user/project/../../../etc/passwd', allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/project/../../other', allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/project/../project2', allowed)).toBe(false);

      // Mixed traversal with valid segments
      expect(isPathWithinAllowedDirectories('/home/user/project/src/../../project2', allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/project/./../../other', allowed)).toBe(false);

      // Multiple traversal sequences
      expect(isPathWithinAllowedDirectories('/home/user/project/../project/../../../etc', allowed)).toBe(false);
    });

    it('blocks traversal in allowed directories', () => {
      const allowed = ['/home/user/project/../safe'];

      // The allowed directory itself should be normalized and safe
      expect(isPathWithinAllowedDirectories('/home/user/safe/file', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/project/file', allowed)).toBe(false);
    });

    it('handles complex traversal patterns', () => {
      const allowed = ['/home/user/project'];

      // Double dots in filenames (not traversal) - these normalize to paths within allowed dir
      expect(isPathWithinAllowedDirectories('/home/user/project/..test', allowed)).toBe(true); // Not traversal
      expect(isPathWithinAllowedDirectories('/home/user/project/test..', allowed)).toBe(true); // Not traversal
      expect(isPathWithinAllowedDirectories('/home/user/project/te..st', allowed)).toBe(true); // Not traversal

      // Actual traversal
      expect(isPathWithinAllowedDirectories('/home/user/project/../test', allowed)).toBe(false); // Is traversal - goes to /home/user/test

      // Edge case: /home/user/project/.. normalizes to /home/user (parent dir)
      expect(isPathWithinAllowedDirectories('/home/user/project/..', allowed)).toBe(false); // Goes to parent
    });
  });

  describe('Validation Tests - Null Bytes', () => {
    it('rejects paths with null bytes', () => {
      const allowed = ['/home/user/project'];

      expect(isPathWithinAllowedDirectories('/home/user/project\x00/etc/passwd', allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/project/test\x00.txt', allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('\x00/home/user/project', allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/project/\x00', allowed)).toBe(false);
    });

    it('rejects allowed directories with null bytes', () => {
      const allowed = ['/home/user/project\x00'];

      expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/project/file', allowed)).toBe(false);
    });
  });

  describe('Validation Tests - Special Characters', () => {
    it('allows percent signs in filenames', () => {
      const allowed = ['/home/user/project'];

      // Percent is a valid filename character
      expect(isPathWithinAllowedDirectories('/home/user/project/report_50%.pdf', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/project/Q1_25%_growth', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/project/%41', allowed)).toBe(true); // File named %41

      // URL encoding is NOT decoded by path.normalize, so these are just odd filenames
      expect(isPathWithinAllowedDirectories('/home/user/project/%2e%2e', allowed)).toBe(true); // File named "%2e%2e"
      expect(isPathWithinAllowedDirectories('/home/user/project/file%20name', allowed)).toBe(true); // File with %20 in name
    });

    it('handles percent signs in allowed directories', () => {
      const allowed = ['/home/user/project%20files'];

      // This is a directory literally named "project%20files"
      expect(isPathWithinAllowedDirectories('/home/user/project%20files/test', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/project files/test', allowed)).toBe(false); // Different dir
    });
  });

  describe('Path Normalization', () => {
    it('normalizes paths before comparison', () => {
      const allowed = ['/home/user/project'];

      // Trailing slashes
      expect(isPathWithinAllowedDirectories('/home/user/project/', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/project//', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/project///', allowed)).toBe(true);

      // Current directory references
      expect(isPathWithinAllowedDirectories('/home/user/project/./src', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/./project/src', allowed)).toBe(true);

      // Multiple slashes
      expect(isPathWithinAllowedDirectories('/home/user/project//src//file', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home//user//project//src', allowed)).toBe(true);

      // Should still block outside paths
      expect(isPathWithinAllowedDirectories('/home/user//project2', allowed)).toBe(false);
    });

    it('handles mixed separators correctly', () => {
      if (path.sep === '\\') {
        const allowed = ['C:\\Users\\project'];

        // Mixed separators should be normalized
        expect(isPathWithinAllowedDirectories('C:/Users/project', allowed)).toBe(true);
        expect(isPathWithinAllowedDirectories('C:\\Users/project\\src', allowed)).toBe(true);
        expect(isPathWithinAllowedDirectories('C:/Users\\project/src', allowed)).toBe(true);
      }
    });
  });

  describe('Edge Cases', () => {
    it('rejects non-string inputs safely', () => {
      const allowed = ['/home/user/project'];

      expect(isPathWithinAllowedDirectories(123 as any, allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories({} as any, allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories([] as any, allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories(null as any, allowed)).toBe(false);
      expect(isPathWithinAllowedDirectories(undefined as any, allowed)).toBe(false);

      // Non-string in allowed directories
      expect(isPathWithinAllowedDirectories('/home/user/project', [123 as any])).toBe(false);
      expect(isPathWithinAllowedDirectories('/home/user/project', [{} as any])).toBe(false);
    });

    it('handles very long paths', () => {
      const allowed = ['/home/user/project'];

      // Create a very long path that's still valid
      const longSubPath = 'a/'.repeat(1000) + 'file.txt';
      expect(isPathWithinAllowedDirectories(`/home/user/project/${longSubPath}`, allowed)).toBe(true);

      // Very long path that escapes
      const escapePath = 'a/'.repeat(1000) + '../'.repeat(1001) + 'etc/passwd';
      expect(isPathWithinAllowedDirectories(`/home/user/project/${escapePath}`, allowed)).toBe(false);
    });
  });

  describe('Additional Coverage', () => {
    it('handles allowed directories with traversal that normalizes safely', () => {
      // These allowed dirs contain traversal but normalize to valid paths
      const allowed = ['/home/user/../user/project'];

      // Should normalize to /home/user/project and work correctly
      expect(isPathWithinAllowedDirectories('/home/user/project/file', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/other', allowed)).toBe(false);
    });

    it('handles symbolic dots in filenames', () => {
      const allowed = ['/home/user/project'];

      // Single and double dots as actual filenames (not traversal)
      expect(isPathWithinAllowedDirectories('/home/user/project/.', allowed)).toBe(true);
      expect(isPathWithinAllowedDirectories('/home/user/project/..', allowed)).toBe(false); // This normalizes to parent
      expect(isPathWithinAllowedDirectories('/home/user/project/...', allowed)).toBe(true); // Three dots is a valid filename
      expect(isPathWithinAllowedDirectories('/home/user/project/....', allowed)).toBe(true); // Four dots is a valid filename
    });

    it('handles UNC paths on Windows', () => {
      if (path.sep === '\\') {
        const allowed = ['\\\\server\\share\\project'];

        expect(isPathWithinAllowedDirectories('\\\\server\\share\\project', allowed)).toBe(true);
        expect(isPathWithinAllowedDirectories('\\\\server\\share\\project\\file', allowed)).toBe(true);
        expect(isPathWithinAllowedDirectories('\\\\server\\share\\other', allowed)).toBe(false);
        expect(isPathWithinAllowedDirectories('\\\\other\\share\\project', allowed)).toBe(false);
      }
    });
  });

  describe('Symlink Tests', () => {
    let testDir: string;
    let allowedDir: string;
    let forbiddenDir: string;

    beforeEach(async () => {
      testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'fs-error-test-'));
      allowedDir = path.join(testDir, 'allowed');
      forbiddenDir = path.join(testDir, 'forbidden');

      await fs.mkdir(allowedDir, { recursive: true });
      await fs.mkdir(forbiddenDir, { recursive: true });
    });

    afterEach(async () => {
      await fs.rm(testDir, { recursive: true, force: true });
    });

    it('validates symlink handling', async () => {
      // Test with symlinks
      try {
        const linkPath = path.join(allowedDir, 'bad-link');
        const targetPath = path.join(forbiddenDir, 'target.txt');

        await fs.writeFile(targetPath, 'content');
        await fs.symlink(targetPath, linkPath);

        // In real implementation, this would throw with the resolved path
        const realPath = await fs.realpath(linkPath);
        const allowed = [allowedDir];

        // Symlink target should be outside allowed directory
        expect(isPathWithinAllowedDirectories(realPath, allowed)).toBe(false);
      } catch (error) {
        // Skip if no symlink permissions
      }
    });

    it('handles non-existent paths correctly', async () => {
      const newFilePath = path.join(allowedDir, 'subdir', 'newfile.txt');

      // Parent directory doesn't exist
      try {
        await fs.access(newFilePath);
      } catch (error) {
        expect((error as NodeJS.ErrnoException).code).toBe('ENOENT');
      }

      // After creating parent, validation should work
      await fs.mkdir(path.dirname(newFilePath), { recursive: true });
      const allowed = [allowedDir];
      expect(isPathWithinAllowedDirectories(newFilePath, allowed)).toBe(true);
    });

    // Test path resolution consistency for symlinked files
    it('validates symlinked files consistently between path and resolved forms', async () => {
      try {
        // Setup: Create target file in forbidden area
        const targetFile = path.join(forbiddenDir, 'target.txt');
        await fs.writeFile(targetFile, 'TARGET_CONTENT');

        // Create symlink inside allowed directory pointing to forbidden file
        const symlinkPath = path.join(allowedDir, 'link-to-target.txt');
        await fs.symlink(targetFile, symlinkPath);

        // The symlink path itself passes validation (looks like it's in allowed dir)
        expect(isPathWithinAllowedDirectories(symlinkPath, [allowedDir])).toBe(true);

        // But the resolved path should fail validation
        const resolvedPath = await fs.realpath(symlinkPath);
        expect(isPathWithinAllowedDirectories(resolvedPath, [allowedDir])).toBe(false);

        // Verify the resolved path goes to the forbidden location (normalize both paths for macOS temp dirs)
        expect(await fs.realpath(resolvedPath)).toBe(await fs.realpath(targetFile));
      } catch (error) {
        // Skip if no symlink permissions on the system
        if ((error as NodeJS.ErrnoException).code !== 'EPERM') {
          throw error;
        }
      }
    });

    // Test allowed directory resolution behavior
    it('validates paths correctly when allowed directory is resolved from symlink', async () => {
      try {
        // Setup: Create the actual target directory with content
        const actualTargetDir = path.join(testDir, 'actual-target');
        await fs.mkdir(actualTargetDir, { recursive: true });
        const targetFile = path.join(actualTargetDir, 'file.txt');
        await fs.writeFile(targetFile, 'FILE_CONTENT');

        // Setup: Create symlink directory that points to target
        const symlinkDir = path.join(testDir, 'symlink-dir');
        await fs.symlink(actualTargetDir, symlinkDir);

        // Simulate resolved allowed directory (what the server startup should do)
        const resolvedAllowedDir = await fs.realpath(symlinkDir);
        const resolvedTargetDir = await fs.realpath(actualTargetDir);
        expect(resolvedAllowedDir).toBe(resolvedTargetDir);

        // Test 1: File access through original symlink path should pass validation with resolved allowed dir
        const fileViaSymlink = path.join(symlinkDir, 'file.txt');
        const resolvedFile = await fs.realpath(fileViaSymlink);
        expect(isPathWithinAllowedDirectories(resolvedFile, [resolvedAllowedDir])).toBe(true);

        // Test 2: File access through resolved path should also pass validation
        const fileViaResolved = path.join(resolvedTargetDir, 'file.txt');
        expect(isPathWithinAllowedDirectories(fileViaResolved, [resolvedAllowedDir])).toBe(true);

        // Test 3: Demonstrate inconsistent behavior with unresolved allowed directories
        // If allowed dirs were not resolved (storing symlink paths instead):
        const unresolvedAllowedDirs = [symlinkDir];
        // This validation would incorrectly fail for the same content:
        expect(isPathWithinAllowedDirectories(resolvedFile, unresolvedAllowedDirs)).toBe(false);

      } catch (error) {
        // Skip if no symlink permissions on the system
        if ((error as NodeJS.ErrnoException).code !== 'EPERM') {
          throw error;
        }
      }
    });

    it('resolves nested symlink chains completely', async () => {
      try {
        // Setup: Create target file in forbidden area
        const actualTarget = path.join(forbiddenDir, 'target-file.txt');
        await fs.writeFile(actualTarget, 'FINAL_CONTENT');

        // Create chain of symlinks: allowedFile -> link2 -> link1 -> actualTarget
        const link1 = path.join(testDir, 'intermediate-link1');
        const link2 = path.join(testDir, 'intermediate-link2');
        const allowedFile = path.join(allowedDir, 'seemingly-safe-file');

        await fs.symlink(actualTarget, link1);
        await fs.symlink(link1, link2);
        await fs.symlink(link2, allowedFile);

        // The allowed file path passes basic validation
        expect(isPathWithinAllowedDirectories(allowedFile, [allowedDir])).toBe(true);

        // But complete resolution reveals the forbidden target
        const fullyResolvedPath = await fs.realpath(allowedFile);
        expect(isPathWithinAllowedDirectories(fullyResolvedPath, [allowedDir])).toBe(false);
        expect(await fs.realpath(fullyResolvedPath)).toBe(await fs.realpath(actualTarget));

      } catch (error) {
        // Skip if no symlink permissions on the system
        if ((error as NodeJS.ErrnoException).code !== 'EPERM') {
          throw error;
        }
      }
    });
  });

  describe('Path Validation Race Condition Tests', () => {
    let testDir: string;
    let allowedDir: string;
    let forbiddenDir: string;
    let targetFile: string;
    let testPath: string;

    beforeEach(async () => {
      testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'race-test-'));
      allowedDir = path.join(testDir, 'allowed');
      forbiddenDir = path.join(testDir, 'outside');
      targetFile = path.join(forbiddenDir, 'target.txt');
      testPath = path.join(allowedDir, 'test.txt');

      await fs.mkdir(allowedDir, { recursive: true });
      await fs.mkdir(forbiddenDir, { recursive: true });
      await fs.writeFile(targetFile, 'ORIGINAL CONTENT', 'utf-8');
    });

    afterEach(async () => {
      await fs.rm(testDir, { recursive: true, force: true });
    });

    it('validates non-existent file paths based on parent directory', async () => {
      const allowed = [allowedDir];

      expect(isPathWithinAllowedDirectories(testPath, allowed)).toBe(true);
      await expect(fs.access(testPath)).rejects.toThrow();

      const parentDir = path.dirname(testPath);
      expect(isPathWithinAllowedDirectories(parentDir, allowed)).toBe(true);
    });

    it('demonstrates symlink race condition allows writing outside allowed directories', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping symlink race condition test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];

      await expect(fs.access(testPath)).rejects.toThrow();
      expect(isPathWithinAllowedDirectories(testPath, allowed)).toBe(true);

      await fs.symlink(targetFile, testPath);
      await fs.writeFile(testPath, 'MODIFIED CONTENT', 'utf-8');

      const targetContent = await fs.readFile(targetFile, 'utf-8');
      expect(targetContent).toBe('MODIFIED CONTENT');

      const resolvedPath = await fs.realpath(testPath);
      expect(isPathWithinAllowedDirectories(resolvedPath, allowed)).toBe(false);
    });

    it('shows timing differences between validation approaches', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping timing validation test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];

      const validation1 = isPathWithinAllowedDirectories(testPath, allowed);
      expect(validation1).toBe(true);

      await fs.symlink(targetFile, testPath);

      const resolvedPath = await fs.realpath(testPath);
      const validation2 = isPathWithinAllowedDirectories(resolvedPath, allowed);
      expect(validation2).toBe(false);

      expect(validation1).not.toBe(validation2);
    });

    it('validates directory creation timing', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping directory creation timing test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];
      const testDir = path.join(allowedDir, 'newdir');

      expect(isPathWithinAllowedDirectories(testDir, allowed)).toBe(true);

      await fs.symlink(forbiddenDir, testDir);

      expect(isPathWithinAllowedDirectories(testDir, allowed)).toBe(true);

      const resolved = await fs.realpath(testDir);
      expect(isPathWithinAllowedDirectories(resolved, allowed)).toBe(false);
    });

    it('demonstrates exclusive file creation behavior', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping exclusive file creation test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];

      await fs.symlink(targetFile, testPath);

      await expect(fs.open(testPath, 'wx')).rejects.toThrow(/EEXIST/);

      await fs.writeFile(testPath, 'NEW CONTENT', 'utf-8');
      const targetContent = await fs.readFile(targetFile, 'utf-8');
      expect(targetContent).toBe('NEW CONTENT');
    });

    it('should use resolved parent paths for non-existent files', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping resolved parent paths test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];

      const symlinkDir = path.join(allowedDir, 'link');
      await fs.symlink(forbiddenDir, symlinkDir);

      const fileThroughSymlink = path.join(symlinkDir, 'newfile.txt');

      expect(fileThroughSymlink.startsWith(allowedDir)).toBe(true);

      const parentDir = path.dirname(fileThroughSymlink);
      const resolvedParent = await fs.realpath(parentDir);
      expect(isPathWithinAllowedDirectories(resolvedParent, allowed)).toBe(false);

      const expectedSafePath = path.join(resolvedParent, path.basename(fileThroughSymlink));
      expect(isPathWithinAllowedDirectories(expectedSafePath, allowed)).toBe(false);
    });

    it('demonstrates parent directory symlink traversal', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping parent directory symlink traversal test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];
      const deepPath = path.join(allowedDir, 'sub1', 'sub2', 'file.txt');

      expect(isPathWithinAllowedDirectories(deepPath, allowed)).toBe(true);

      const sub1Path = path.join(allowedDir, 'sub1');
      await fs.symlink(forbiddenDir, sub1Path);

      await fs.mkdir(path.join(sub1Path, 'sub2'), { recursive: true });
      await fs.writeFile(deepPath, 'CONTENT', 'utf-8');

      const realPath = await fs.realpath(deepPath);
      const realAllowedDir = await fs.realpath(allowedDir);
      const realForbiddenDir = await fs.realpath(forbiddenDir);

      expect(realPath.startsWith(realAllowedDir)).toBe(false);
      expect(realPath.startsWith(realForbiddenDir)).toBe(true);
    });

    it('should prevent race condition between validatePath and file operation', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping race condition prevention test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];
      const racePath = path.join(allowedDir, 'race-file.txt');
      const targetFile = path.join(forbiddenDir, 'target.txt');

      await fs.writeFile(targetFile, 'ORIGINAL CONTENT', 'utf-8');

      // Path validation would pass (file doesn't exist, parent is in allowed dir)
      expect(await fs.access(racePath).then(() => false).catch(() => true)).toBe(true);
      expect(isPathWithinAllowedDirectories(racePath, allowed)).toBe(true);

      // Race condition: symlink created after validation but before write
      await fs.symlink(targetFile, racePath);

      // With exclusive write flag, write should fail on symlink
      await expect(
        fs.writeFile(racePath, 'NEW CONTENT', { encoding: 'utf-8', flag: 'wx' })
      ).rejects.toThrow(/EEXIST/);

      // Verify content unchanged
      const targetContent = await fs.readFile(targetFile, 'utf-8');
      expect(targetContent).toBe('ORIGINAL CONTENT');

      // The symlink exists but write was blocked
      const actualWritePath = await fs.realpath(racePath);
      expect(actualWritePath).toBe(await fs.realpath(targetFile));
      expect(isPathWithinAllowedDirectories(actualWritePath, allowed)).toBe(false);
    });

    it('should allow overwrites to legitimate files within allowed directories', async () => {
      const allowed = [allowedDir];
      const legitFile = path.join(allowedDir, 'legit-file.txt');

      // Create a legitimate file
      await fs.writeFile(legitFile, 'ORIGINAL', 'utf-8');

      // Opening with w should work for legitimate files
      const fd = await fs.open(legitFile, 'w');
      try {
        await fd.write('UPDATED', 0, 'utf-8');
      } finally {
        await fd.close();
      }

      const content = await fs.readFile(legitFile, 'utf-8');
      expect(content).toBe('UPDATED');
    });

    it('should handle symlinks that point within allowed directories', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping symlinks within allowed directories test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];
      const targetFile = path.join(allowedDir, 'target.txt');
      const symlinkPath = path.join(allowedDir, 'symlink.txt');

      // Create target file within allowed directory
      await fs.writeFile(targetFile, 'TARGET CONTENT', 'utf-8');

      // Create symlink pointing to allowed file
      await fs.symlink(targetFile, symlinkPath);

      // Opening symlink with w follows it to the target
      const fd = await fs.open(symlinkPath, 'w');
      try {
        await fd.write('UPDATED VIA SYMLINK', 0, 'utf-8');
      } finally {
        await fd.close();
      }

      // Both symlink and target should show updated content
      const symlinkContent = await fs.readFile(symlinkPath, 'utf-8');
      const targetContent = await fs.readFile(targetFile, 'utf-8');
      expect(symlinkContent).toBe('UPDATED VIA SYMLINK');
      expect(targetContent).toBe('UPDATED VIA SYMLINK');
    });

    it('should prevent overwriting files through symlinks pointing outside allowed directories', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping symlink overwrite prevention test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];
      const legitFile = path.join(allowedDir, 'existing.txt');
      const targetFile = path.join(forbiddenDir, 'target.txt');

      // Create a legitimate file first
      await fs.writeFile(legitFile, 'LEGIT CONTENT', 'utf-8');

      // Create target file in forbidden directory
      await fs.writeFile(targetFile, 'FORBIDDEN CONTENT', 'utf-8');

      // Now replace the legitimate file with a symlink to forbidden location
      await fs.unlink(legitFile);
      await fs.symlink(targetFile, legitFile);

      // Simulate the server's validation logic
      const stats = await fs.lstat(legitFile);
      expect(stats.isSymbolicLink()).toBe(true);

      const realPath = await fs.realpath(legitFile);
      expect(isPathWithinAllowedDirectories(realPath, allowed)).toBe(false);

      // With atomic rename, symlinks are replaced not followed
      // So this test now demonstrates the protection

      // Verify content remains unchanged
      const targetContent = await fs.readFile(targetFile, 'utf-8');
      expect(targetContent).toBe('FORBIDDEN CONTENT');
    });

    it('demonstrates race condition in read operations', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping race condition in read operations test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];
      const legitFile = path.join(allowedDir, 'readable.txt');
      const secretFile = path.join(forbiddenDir, 'secret.txt');

      // Create legitimate file
      await fs.writeFile(legitFile, 'PUBLIC CONTENT', 'utf-8');

      // Create secret file in forbidden directory
      await fs.writeFile(secretFile, 'SECRET CONTENT', 'utf-8');

      // Step 1: validatePath would pass for legitimate file
      expect(isPathWithinAllowedDirectories(legitFile, allowed)).toBe(true);

      // Step 2: Race condition - replace file with symlink after validation
      await fs.unlink(legitFile);
      await fs.symlink(secretFile, legitFile);

      // Step 3: Read operation follows symlink to forbidden location
      const content = await fs.readFile(legitFile, 'utf-8');

      // This shows the vulnerability - we read forbidden content
      expect(content).toBe('SECRET CONTENT');
      expect(isPathWithinAllowedDirectories(await fs.realpath(legitFile), allowed)).toBe(false);
    });

    it('verifies rename does not follow symlinks', async () => {
      const symlinkSupported = await getSymlinkSupport();
      if (!symlinkSupported) {
        console.log('   ⏭️  Skipping rename symlink test - symlinks not supported');
        return;
      }

      const allowed = [allowedDir];
      const tempFile = path.join(allowedDir, 'temp.txt');
      const targetSymlink = path.join(allowedDir, 'target-symlink.txt');
      const forbiddenTarget = path.join(forbiddenDir, 'forbidden-target.txt');

      // Create forbidden target
      await fs.writeFile(forbiddenTarget, 'ORIGINAL CONTENT', 'utf-8');

      // Create symlink pointing to forbidden location
      await fs.symlink(forbiddenTarget, targetSymlink);

      // Write temp file
      await fs.writeFile(tempFile, 'NEW CONTENT', 'utf-8');

      // Rename temp file to symlink path
      await fs.rename(tempFile, targetSymlink);

      // Check what happened
      const symlinkExists = await fs.lstat(targetSymlink).then(() => true).catch(() => false);
      const isSymlink = symlinkExists && (await fs.lstat(targetSymlink)).isSymbolicLink();
      const targetContent = await fs.readFile(targetSymlink, 'utf-8');
      const forbiddenContent = await fs.readFile(forbiddenTarget, 'utf-8');

      // Rename should replace the symlink with a regular file
      expect(isSymlink).toBe(false);
      expect(targetContent).toBe('NEW CONTENT');
      expect(forbiddenContent).toBe('ORIGINAL CONTENT'); // Unchanged
    });
  });
});

```

## /src/filesystem/__tests__/roots-utils.test.ts

```ts path="/src/filesystem/__tests__/roots-utils.test.ts" 
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { getValidRootDirectories } from '../roots-utils.js';
import { mkdtempSync, rmSync, mkdirSync, writeFileSync, realpathSync } from 'fs';
import { tmpdir } from 'os';
import { join } from 'path';
import type { Root } from '@modelcontextprotocol/sdk/types.js';

describe('getValidRootDirectories', () => {
  let testDir1: string;
  let testDir2: string;
  let testDir3: string;
  let testFile: string;

  beforeEach(() => {
    // Create test directories
    testDir1 = realpathSync(mkdtempSync(join(tmpdir(), 'mcp-roots-test1-')));
    testDir2 = realpathSync(mkdtempSync(join(tmpdir(), 'mcp-roots-test2-')));
    testDir3 = realpathSync(mkdtempSync(join(tmpdir(), 'mcp-roots-test3-')));

    // Create a test file (not a directory)
    testFile = join(testDir1, 'test-file.txt');
    writeFileSync(testFile, 'test content');
  });

  afterEach(() => {
    // Cleanup
    rmSync(testDir1, { recursive: true, force: true });
    rmSync(testDir2, { recursive: true, force: true });
    rmSync(testDir3, { recursive: true, force: true });
  });

  describe('valid directory processing', () => {
    it('should process all URI formats and edge cases', async () => {
      const roots = [
        { uri: `file://${testDir1}`, name: 'File URI' },
        { uri: testDir2, name: 'Plain path' },
        { uri: testDir3 } // Plain path without name property
      ];

      const result = await getValidRootDirectories(roots);

      expect(result).toContain(testDir1);
      expect(result).toContain(testDir2);
      expect(result).toContain(testDir3);
      expect(result).toHaveLength(3);
    });

    it('should normalize complex paths', async () => {
      const subDir = join(testDir1, 'subdir');
      mkdirSync(subDir);
      
      const roots = [
        { uri: `file://${testDir1}/./subdir/../subdir`, name: 'Complex Path' }
      ];

      const result = await getValidRootDirectories(roots);

      expect(result).toHaveLength(1);
      expect(result[0]).toBe(subDir);
    });
  });

  describe('error handling', () => {

    it('should handle various error types', async () => {
      const nonExistentDir = join(tmpdir(), 'non-existent-directory-12345');
      const invalidPath = '\0invalid\0path'; // Null bytes cause different error types
      const roots = [
        { uri: `file://${testDir1}`, name: 'Valid Dir' },
        { uri: `file://${nonExistentDir}`, name: 'Non-existent Dir' },
        { uri: `file://${testFile}`, name: 'File Not Dir' },
        { uri: `file://${invalidPath}`, name: 'Invalid Path' }
      ];

      const result = await getValidRootDirectories(roots);

      expect(result).toContain(testDir1);
      expect(result).not.toContain(nonExistentDir);
      expect(result).not.toContain(testFile);
      expect(result).not.toContain(invalidPath);
      expect(result).toHaveLength(1);
    });
  });
});
```

## /src/filesystem/lib.ts

```ts path="/src/filesystem/lib.ts" 
import fs from "fs/promises";
import path from "path";
import os from 'os';
import { randomBytes } from 'crypto';
import { diffLines, createTwoFilesPatch } from 'diff';
import { minimatch } from 'minimatch';
import { normalizePath, expandHome } from './path-utils.js';
import { isPathWithinAllowedDirectories } from './path-validation.js';

// Global allowed directories - set by the main module
let allowedDirectories: string[] = [];

// Function to set allowed directories from the main module
export function setAllowedDirectories(directories: string[]): void {
  allowedDirectories = [...directories];
}

// Function to get current allowed directories
export function getAllowedDirectories(): string[] {
  return [...allowedDirectories];
}

// Type definitions
interface FileInfo {
  size: number;
  created: Date;
  modified: Date;
  accessed: Date;
  isDirectory: boolean;
  isFile: boolean;
  permissions: string;
}

export interface SearchOptions {
  excludePatterns?: string[];
}

export interface SearchResult {
  path: string;
  isDirectory: boolean;
}

// Pure Utility Functions
export function formatSize(bytes: number): string {
  const units = ['B', 'KB', 'MB', 'GB', 'TB'];
  if (bytes === 0) return '0 B';
  
  const i = Math.floor(Math.log(bytes) / Math.log(1024));
  
  if (i < 0 || i === 0) return `${bytes} ${units[0]}`;
  
  const unitIndex = Math.min(i, units.length - 1);
  return `${(bytes / Math.pow(1024, unitIndex)).toFixed(2)} ${units[unitIndex]}`;
}

export function normalizeLineEndings(text: string): string {
  return text.replace(/\r\n/g, '\n');
}

export function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {
  // Ensure consistent line endings for diff
  const normalizedOriginal = normalizeLineEndings(originalContent);
  const normalizedNew = normalizeLineEndings(newContent);

  return createTwoFilesPatch(
    filepath,
    filepath,
    normalizedOriginal,
    normalizedNew,
    'original',
    'modified'
  );
}

// Security & Validation Functions
export async function validatePath(requestedPath: string): Promise<string> {
  const expandedPath = expandHome(requestedPath);
  const absolute = path.isAbsolute(expandedPath)
    ? path.resolve(expandedPath)
    : path.resolve(process.cwd(), expandedPath);

  const normalizedRequested = normalizePath(absolute);

  // Security: Check if path is within allowed directories before any file operations
  const isAllowed = isPathWithinAllowedDirectories(normalizedRequested, allowedDirectories);
  if (!isAllowed) {
    throw new Error(`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`);
  }

  // Security: Handle symlinks by checking their real path to prevent symlink attacks
  // This prevents attackers from creating symlinks that point outside allowed directories
  try {
    const realPath = await fs.realpath(absolute);
    const normalizedReal = normalizePath(realPath);
    if (!isPathWithinAllowedDirectories(normalizedReal, allowedDirectories)) {
      throw new Error(`Access denied - symlink target outside allowed directories: ${realPath} not in ${allowedDirectories.join(', ')}`);
    }
    return realPath;
  } catch (error) {
    // Security: For new files that don't exist yet, verify parent directory
    // This ensures we can't create files in unauthorized locations
    if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
      const parentDir = path.dirname(absolute);
      try {
        const realParentPath = await fs.realpath(parentDir);
        const normalizedParent = normalizePath(realParentPath);
        if (!isPathWithinAllowedDirectories(normalizedParent, allowedDirectories)) {
          throw new Error(`Access denied - parent directory outside allowed directories: ${realParentPath} not in ${allowedDirectories.join(', ')}`);
        }
        return absolute;
      } catch {
        throw new Error(`Parent directory does not exist: ${parentDir}`);
      }
    }
    throw error;
  }
}


// File Operations
export async function getFileStats(filePath: string): Promise<FileInfo> {
  const stats = await fs.stat(filePath);
  return {
    size: stats.size,
    created: stats.birthtime,
    modified: stats.mtime,
    accessed: stats.atime,
    isDirectory: stats.isDirectory(),
    isFile: stats.isFile(),
    permissions: stats.mode.toString(8).slice(-3),
  };
}

export async function readFileContent(filePath: string, encoding: string = 'utf-8'): Promise<string> {
  return await fs.readFile(filePath, encoding as BufferEncoding);
}

export async function writeFileContent(filePath: string, content: string): Promise<void> {
  try {
    // Security: 'wx' flag ensures exclusive creation - fails if file/symlink exists,
    // preventing writes through pre-existing symlinks
    await fs.writeFile(filePath, content, { encoding: "utf-8", flag: 'wx' });
  } catch (error) {
    if ((error as NodeJS.ErrnoException).code === 'EEXIST') {
      // Security: Use atomic rename to prevent race conditions where symlinks
      // could be created between validation and write. Rename operations
      // replace the target file atomically and don't follow symlinks.
      const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`;
      try {
        await fs.writeFile(tempPath, content, 'utf-8');
        await fs.rename(tempPath, filePath);
      } catch (renameError) {
        try {
          await fs.unlink(tempPath);
        } catch {}
        throw renameError;
      }
    } else {
      throw error;
    }
  }
}


// File Editing Functions
interface FileEdit {
  oldText: string;
  newText: string;
}

export async function applyFileEdits(
  filePath: string,
  edits: FileEdit[],
  dryRun: boolean = false
): Promise<string> {
  // Read file content and normalize line endings
  const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'));

  // Apply edits sequentially
  let modifiedContent = content;
  for (const edit of edits) {
    const normalizedOld = normalizeLineEndings(edit.oldText);
    const normalizedNew = normalizeLineEndings(edit.newText);

    // If exact match exists, use it
    if (modifiedContent.includes(normalizedOld)) {
      modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew);
      continue;
    }

    // Otherwise, try line-by-line matching with flexibility for whitespace
    const oldLines = normalizedOld.split('\n');
    const contentLines = modifiedContent.split('\n');
    let matchFound = false;

    for (let i = 0; i <= contentLines.length - oldLines.length; i++) {
      const potentialMatch = contentLines.slice(i, i + oldLines.length);

      // Compare lines with normalized whitespace
      const isMatch = oldLines.every((oldLine, j) => {
        const contentLine = potentialMatch[j];
        return oldLine.trim() === contentLine.trim();
      });

      if (isMatch) {
        // Preserve original indentation of first line
        const originalIndent = contentLines[i].match(/^\s*/)?.[0] || '';
        const newLines = normalizedNew.split('\n').map((line, j) => {
          if (j === 0) return originalIndent + line.trimStart();
          // For subsequent lines, try to preserve relative indentation
          const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || '';
          const newIndent = line.match(/^\s*/)?.[0] || '';
          if (oldIndent && newIndent) {
            const relativeIndent = newIndent.length - oldIndent.length;
            return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart();
          }
          return line;
        });

        contentLines.splice(i, oldLines.length, ...newLines);
        modifiedContent = contentLines.join('\n');
        matchFound = true;
        break;
      }
    }

    if (!matchFound) {
      throw new Error(`Could not find exact match for edit:\n${edit.oldText}`);
    }
  }

  // Create unified diff
  const diff = createUnifiedDiff(content, modifiedContent, filePath);

  // Format diff with appropriate number of backticks
  let numBackticks = 3;
  while (diff.includes('`'.repeat(numBackticks))) {
    numBackticks++;
  }
  const formattedDiff = `${'`'.repeat(numBackticks)}diff\n${diff}${'`'.repeat(numBackticks)}\n\n`;

  if (!dryRun) {
    // Security: Use atomic rename to prevent race conditions where symlinks
    // could be created between validation and write. Rename operations
    // replace the target file atomically and don't follow symlinks.
    const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`;
    try {
      await fs.writeFile(tempPath, modifiedContent, 'utf-8');
      await fs.rename(tempPath, filePath);
    } catch (error) {
      try {
        await fs.unlink(tempPath);
      } catch {}
      throw error;
    }
  }

  return formattedDiff;
}

// Memory-efficient implementation to get the last N lines of a file
export async function tailFile(filePath: string, numLines: number): Promise<string> {
  const CHUNK_SIZE = 1024; // Read 1KB at a time
  const stats = await fs.stat(filePath);
  const fileSize = stats.size;
  
  if (fileSize === 0) return '';
  
  // Open file for reading
  const fileHandle = await fs.open(filePath, 'r');
  try {
    const lines: string[] = [];
    let position = fileSize;
    let chunk = Buffer.alloc(CHUNK_SIZE);
    let linesFound = 0;
    let remainingText = '';
    
    // Read chunks from the end of the file until we have enough lines
    while (position > 0 && linesFound < numLines) {
      const size = Math.min(CHUNK_SIZE, position);
      position -= size;
      
      const { bytesRead } = await fileHandle.read(chunk, 0, size, position);
      if (!bytesRead) break;
      
      // Get the chunk as a string and prepend any remaining text from previous iteration
      const readData = chunk.slice(0, bytesRead).toString('utf-8');
      const chunkText = readData + remainingText;
      
      // Split by newlines and count
      const chunkLines = normalizeLineEndings(chunkText).split('\n');
      
      // If this isn't the end of the file, the first line is likely incomplete
      // Save it to prepend to the next chunk
      if (position > 0) {
        remainingText = chunkLines[0];
        chunkLines.shift(); // Remove the first (incomplete) line
      }
      
      // Add lines to our result (up to the number we need)
      for (let i = chunkLines.length - 1; i >= 0 && linesFound < numLines; i--) {
        lines.unshift(chunkLines[i]);
        linesFound++;
      }
    }
    
    return lines.join('\n');
  } finally {
    await fileHandle.close();
  }
}

// New function to get the first N lines of a file
export async function headFile(filePath: string, numLines: number): Promise<string> {
  const fileHandle = await fs.open(filePath, 'r');
  try {
    const lines: string[] = [];
    let buffer = '';
    let bytesRead = 0;
    const chunk = Buffer.alloc(1024); // 1KB buffer
    
    // Read chunks and count lines until we have enough or reach EOF
    while (lines.length < numLines) {
      const result = await fileHandle.read(chunk, 0, chunk.length, bytesRead);
      if (result.bytesRead === 0) break; // End of file
      bytesRead += result.bytesRead;
      buffer += chunk.slice(0, result.bytesRead).toString('utf-8');
      
      const newLineIndex = buffer.lastIndexOf('\n');
      if (newLineIndex !== -1) {
        const completeLines = buffer.slice(0, newLineIndex).split('\n');
        buffer = buffer.slice(newLineIndex + 1);
        for (const line of completeLines) {
          lines.push(line);
          if (lines.length >= numLines) break;
        }
      }
    }
    
    // If there is leftover content and we still need lines, add it
    if (buffer.length > 0 && lines.length < numLines) {
      lines.push(buffer);
    }
    
    return lines.join('\n');
  } finally {
    await fileHandle.close();
  }
}

export async function searchFilesWithValidation(
  rootPath: string,
  pattern: string,
  allowedDirectories: string[],
  options: SearchOptions = {}
): Promise<string[]> {
  const { excludePatterns = [] } = options;
  const results: string[] = [];

  async function search(currentPath: string) {
    const entries = await fs.readdir(currentPath, { withFileTypes: true });

    for (const entry of entries) {
      const fullPath = path.join(currentPath, entry.name);

      try {
        await validatePath(fullPath);

        const relativePath = path.relative(rootPath, fullPath);
        const shouldExclude = excludePatterns.some(excludePattern =>
          minimatch(relativePath, excludePattern, { dot: true })
        );

        if (shouldExclude) continue;

        // Use glob matching for the search pattern
        if (minimatch(relativePath, pattern, { dot: true })) {
          results.push(fullPath);
        }

        if (entry.isDirectory()) {
          await search(fullPath);
        }
      } catch {
        continue;
      }
    }
  }

  await search(rootPath);
  return results;
}

```

## /src/filesystem/package.json

```json path="/src/filesystem/package.json" 
{
  "name": "@modelcontextprotocol/server-filesystem",
  "version": "0.6.3",
  "description": "MCP server for filesystem access",
  "license": "MIT",
  "author": "Anthropic, PBC (https://anthropic.com)",
  "homepage": "https://modelcontextprotocol.io",
  "bugs": "https://github.com/modelcontextprotocol/servers/issues",
  "type": "module",
  "bin": {
    "mcp-server-filesystem": "dist/index.js"
  },
  "files": [
    "dist"
  ],
  "scripts": {
    "build": "tsc && shx chmod +x dist/*.js",
    "prepare": "npm run build",
    "watch": "tsc --watch",
    "test": "vitest run --coverage"
  },
  "dependencies": {
    "@modelcontextprotocol/sdk": "^1.19.1",
    "diff": "^5.1.0",
    "glob": "^10.3.10",
    "minimatch": "^10.0.1",
    "zod-to-json-schema": "^3.23.5"
  },
  "devDependencies": {
    "@types/diff": "^5.0.9",
    "@types/minimatch": "^5.1.2",
    "@types/node": "^22",
    "@vitest/coverage-v8": "^2.1.8",
    "shx": "^0.3.4",
    "typescript": "^5.8.2",
    "vitest": "^2.1.8"
  }
}

```

## /src/filesystem/path-utils.ts

```ts path="/src/filesystem/path-utils.ts" 
import path from "path";
import os from 'os';

/**
 * Converts WSL or Unix-style Windows paths to Windows format
 * @param p The path to convert
 * @returns Converted Windows path
 */
export function convertToWindowsPath(p: string): string {
  // Handle WSL paths (/mnt/c/...)
  // NEVER convert WSL paths - they are valid Linux paths that work with Node.js fs operations in WSL
  // Converting them to Windows format (C:\...) breaks fs operations inside WSL
  if (p.startsWith('/mnt/')) {
    return p; // Leave WSL paths unchanged
  }

  // Handle Unix-style Windows paths (/c/...)
  // Only convert when running on Windows
  if (p.match(/^\/[a-zA-Z]\//) && process.platform === 'win32') {
    const driveLetter = p.charAt(1).toUpperCase();
    const pathPart = p.slice(2).replace(/\//g, '\\');
    return `${driveLetter}:${pathPart}`;
  }

  // Handle standard Windows paths, ensuring backslashes
  if (p.match(/^[a-zA-Z]:/)) {
    return p.replace(/\//g, '\\');
  }

  // Leave non-Windows paths unchanged
  return p;
}

/**
 * Normalizes path by standardizing format while preserving OS-specific behavior
 * @param p The path to normalize
 * @returns Normalized path
 */
export function normalizePath(p: string): string {
  // Remove any surrounding quotes and whitespace
  p = p.trim().replace(/^["']|["']$/g, '');

  // Check if this is a Unix path that should not be converted
  // WSL paths (/mnt/) should ALWAYS be preserved as they work correctly in WSL with Node.js fs
  // Regular Unix paths should also be preserved
  const isUnixPath = p.startsWith('/') && (
    // Always preserve WSL paths (/mnt/c/, /mnt/d/, etc.)
    p.match(/^\/mnt\/[a-z]\//i) ||
    // On non-Windows platforms, treat all absolute paths as Unix paths
    (process.platform !== 'win32') ||
    // On Windows, preserve Unix paths that aren't Unix-style Windows paths (/c/, /d/, etc.)
    (process.platform === 'win32' && !p.match(/^\/[a-zA-Z]\//))
  );

  if (isUnixPath) {
    // For Unix paths, just normalize without converting to Windows format
    // Replace double slashes with single slashes and remove trailing slashes
    return p.replace(/\/+/g, '/').replace(/\/+$/, '');
  }

  // Convert Unix-style Windows paths (/c/, /d/) to Windows format if on Windows
  // This function will now leave /mnt/ paths unchanged
  p = convertToWindowsPath(p);

  // Handle double backslashes, preserving leading UNC \\
  if (p.startsWith('\\\\')) {
    // For UNC paths, first normalize any excessive leading backslashes to exactly \\
    // Then normalize double backslashes in the rest of the path
    let uncPath = p;
    // Replace multiple leading backslashes with exactly two
    uncPath = uncPath.replace(/^\\{2,}/, '\\\\');
    // Now normalize any remaining double backslashes in the rest of the path
    const restOfPath = uncPath.substring(2).replace(/\\\\/g, '\\');
    p = '\\\\' + restOfPath;
  } else {
    // For non-UNC paths, normalize all double backslashes
    p = p.replace(/\\\\/g, '\\');
  }

  // Use Node's path normalization, which handles . and .. segments
  let normalized = path.normalize(p);

  // Fix UNC paths after normalization (path.normalize can remove a leading backslash)
  if (p.startsWith('\\\\') && !normalized.startsWith('\\\\')) {
    normalized = '\\' + normalized;
  }

  // Handle Windows paths: convert slashes and ensure drive letter is capitalized
  if (normalized.match(/^[a-zA-Z]:/)) {
    let result = normalized.replace(/\//g, '\\');
    // Capitalize drive letter if present
    if (/^[a-z]:/.test(result)) {
      result = result.charAt(0).toUpperCase() + result.slice(1);
    }
    return result;
  }

  // On Windows, convert forward slashes to backslashes for relative paths
  // On Linux/Unix, preserve forward slashes
  if (process.platform === 'win32') {
    return normalized.replace(/\//g, '\\');
  }

  // On non-Windows platforms, keep the normalized path as-is
  return normalized;
}

/**
 * Expands home directory tildes in paths
 * @param filepath The path to expand
 * @returns Expanded path
 */
export function expandHome(filepath: string): string {
  if (filepath.startsWith('~/') || filepath === '~') {
    return path.join(os.homedir(), filepath.slice(1));
  }
  return filepath;
}

```

## /src/filesystem/path-validation.ts

```ts path="/src/filesystem/path-validation.ts" 
import path from 'path';

/**
 * Checks if an absolute path is within any of the allowed directories.
 * 
 * @param absolutePath - The absolute path to check (will be normalized)
 * @param allowedDirectories - Array of absolute allowed directory paths (will be normalized)
 * @returns true if the path is within an allowed directory, false otherwise
 * @throws Error if given relative paths after normalization
 */
export function isPathWithinAllowedDirectories(absolutePath: string, allowedDirectories: string[]): boolean {
  // Type validation
  if (typeof absolutePath !== 'string' || !Array.isArray(allowedDirectories)) {
    return false;
  }

  // Reject empty inputs
  if (!absolutePath || allowedDirectories.length === 0) {
    return false;
  }

  // Reject null bytes (forbidden in paths)
  if (absolutePath.includes('\x00')) {
    return false;
  }

  // Normalize the input path
  let normalizedPath: string;
  try {
    normalizedPath = path.resolve(path.normalize(absolutePath));
  } catch {
    return false;
  }

  // Verify it's absolute after normalization
  if (!path.isAbsolute(normalizedPath)) {
    throw new Error('Path must be absolute after normalization');
  }

  // Check against each allowed directory
  return allowedDirectories.some(dir => {
    if (typeof dir !== 'string' || !dir) {
      return false;
    }

    // Reject null bytes in allowed dirs
    if (dir.includes('\x00')) {
      return false;
    }

    // Normalize the allowed directory
    let normalizedDir: string;
    try {
      normalizedDir = path.resolve(path.normalize(dir));
    } catch {
      return false;
    }

    // Verify allowed directory is absolute after normalization
    if (!path.isAbsolute(normalizedDir)) {
      throw new Error('Allowed directories must be absolute paths after normalization');
    }

    // Check if normalizedPath is within normalizedDir
    // Path is inside if it's the same or a subdirectory
    if (normalizedPath === normalizedDir) {
      return true;
    }
    
    // Special case for root directory to avoid double slash
    // On Windows, we need to check if both paths are on the same drive
    if (normalizedDir === path.sep) {
      return normalizedPath.startsWith(path.sep);
    }
    
    // On Windows, also check for drive root (e.g., "C:\")
    if (path.sep === '\\' && normalizedDir.match(/^[A-Za-z]:\\?$/)) {
      // Ensure both paths are on the same drive
      const dirDrive = normalizedDir.charAt(0).toLowerCase();
      const pathDrive = normalizedPath.charAt(0).toLowerCase();
      return pathDrive === dirDrive && normalizedPath.startsWith(normalizedDir.replace(/\\?$/, '\\'));
    }
    
    return normalizedPath.startsWith(normalizedDir + path.sep);
  });
}

```

## /src/filesystem/roots-utils.ts

```ts path="/src/filesystem/roots-utils.ts" 
import { promises as fs, type Stats } from 'fs';
import path from 'path';
import os from 'os';
import { normalizePath } from './path-utils.js';
import type { Root } from '@modelcontextprotocol/sdk/types.js';

/**
 * Converts a root URI to a normalized directory path with basic security validation.
 * @param rootUri - File URI (file://...) or plain directory path
 * @returns Promise resolving to validated path or null if invalid
 */
async function parseRootUri(rootUri: string): Promise<string | null> {
  try {
    const rawPath = rootUri.startsWith('file://') ? rootUri.slice(7) : rootUri;
    const expandedPath = rawPath.startsWith('~/') || rawPath === '~' 
      ? path.join(os.homedir(), rawPath.slice(1)) 
      : rawPath;
    const absolutePath = path.resolve(expandedPath);
    const resolvedPath = await fs.realpath(absolutePath);
    return normalizePath(resolvedPath);
  } catch {
    return null; // Path doesn't exist or other error
  }
}

/**
 * Formats error message for directory validation failures.
 * @param dir - Directory path that failed validation
 * @param error - Error that occurred during validation
 * @param reason - Specific reason for failure
 * @returns Formatted error message
 */
function formatDirectoryError(dir: string, error?: unknown, reason?: string): string {
  if (reason) {
    return `Skipping ${reason}: ${dir}`;
  }
  const message = error instanceof Error ? error.message : String(error);
  return `Skipping invalid directory: ${dir} due to error: ${message}`;
}

/**
 * Resolves requested root directories from MCP root specifications.
 * 
 * Converts root URI specifications (file:// URIs or plain paths) into normalized
 * directory paths, validating that each path exists and is a directory.
 * Includes symlink resolution for security.
 * 
 * @param requestedRoots - Array of root specifications with URI and optional name
 * @returns Promise resolving to array of validated directory paths
 */
export async function getValidRootDirectories(
  requestedRoots: readonly Root[]
): Promise<string[]> {
  const validatedDirectories: string[] = [];
  
  for (const requestedRoot of requestedRoots) {
    const resolvedPath = await parseRootUri(requestedRoot.uri);
    if (!resolvedPath) {
      console.error(formatDirectoryError(requestedRoot.uri, undefined, 'invalid path or inaccessible'));
      continue;
    }
    
    try {
      const stats: Stats = await fs.stat(resolvedPath);
      if (stats.isDirectory()) {
        validatedDirectories.push(resolvedPath);
      } else {
        console.error(formatDirectoryError(resolvedPath, undefined, 'non-directory root'));
      }
    } catch (error) {
      console.error(formatDirectoryError(resolvedPath, error));
    }
  }
  
  return validatedDirectories;
}
```

## /src/filesystem/tsconfig.json

```json path="/src/filesystem/tsconfig.json" 
{
  "extends": "../../tsconfig.json",
  "compilerOptions": {
    "outDir": "./dist",
    "rootDir": ".",
    "moduleResolution": "NodeNext",
    "module": "NodeNext"
  },
  "include": [
    "./**/*.ts"
  ]
}

```

## /src/git/.gitignore

```gitignore path="/src/git/.gitignore" 
__pycache__
.venv

```

## /src/git/.python-version

```python-version path="/src/git/.python-version" 
3.10

```


The content has been capped at 50000 tokens. The user could consider applying other filters to refine the result. The better and more specific the context, the better the LLM can follow instructions. If the context seems verbose, the user can refine the filter using uithub. Thank you for using https://uithub.com - Perfect LLM context for any GitHub repo.
Copied!