Skip to content

Testing Guide

Code Scalpel maintains strict testing standards with 95%+ coverage.

Test Framework

Tool Purpose
pytest Test runner
pytest-cov Coverage reporting
pytest-asyncio Async test support

Running Tests

Full Test Suite

pytest tests/

With Coverage

pytest --cov=src/code_scalpel --cov-report=html tests/

# View report
open htmlcov/index.html  # Mac
xdg-open htmlcov/index.html  # Linux

Specific Tests

# Single file
pytest tests/tools/test_analyze.py

# Single test
pytest tests/tools/test_analyze.py::test_analyze_python

# By pattern
pytest -k "security" tests/

# By marker
pytest -m "not slow" tests/

Test Organization

tests/
├── conftest.py          # Shared fixtures
├── tools/               # Tool tests
│   ├── test_analyze.py
│   ├── test_extract.py
│   └── tiers/           # Tier-specific tests
│       ├── conftest.py  # Tier fixtures
│       └── test_*.py
├── core/                # Core engine tests
├── parsers/             # Parser tests
└── integration/         # Integration tests

Writing Tests

Basic Test

import pytest
from code_scalpel.mcp.tools import analyze

@pytest.mark.asyncio
async def test_analyze_python():
    """Test basic Python analysis."""
    code = """
def hello():
    print("Hello, World!")
"""
    result = await analyze.analyze_code(code=code, language="python")

    assert result.tier_applied == "community"
    assert len(result.data["functions"]) == 1
    assert result.data["functions"][0]["name"] == "hello"

Test with Fixtures

@pytest.fixture
def sample_python_code():
    """Provide sample Python code for tests."""
    return """
class Calculator:
    def add(self, a: int, b: int) -> int:
        return a + b

    def subtract(self, a: int, b: int) -> int:
        return a - b
"""

@pytest.mark.asyncio
async def test_extract_method(sample_python_code, tmp_path):
    """Test method extraction."""
    # Create temp file
    file_path = tmp_path / "calculator.py"
    file_path.write_text(sample_python_code)

    result = await extract.extract_code(
        file_path=str(file_path),
        target_name="add",
        target_type="method"
    )

    assert "def add" in result.data["extracted_code"]

Parameterized Tests

@pytest.mark.parametrize("language,expected_functions", [
    ("python", ["hello"]),
    ("javascript", ["hello"]),
    ("typescript", ["hello"]),
])
@pytest.mark.asyncio
async def test_analyze_multiple_languages(language, expected_functions):
    """Test analysis across languages."""
    code_samples = {
        "python": "def hello(): pass",
        "javascript": "function hello() {}",
        "typescript": "function hello(): void {}",
    }

    result = await analyze.analyze_code(
        code=code_samples[language],
        language=language
    )

    function_names = [f["name"] for f in result.data["functions"]]
    assert function_names == expected_functions

Tier Testing

Using Tier Fixtures

# tests/tools/tiers/conftest.py provides these fixtures

@pytest.mark.asyncio
async def test_community_limits(community_tier, tmp_path):
    """Test Community tier respects limits."""
    # community_tier fixture sets CODE_SCALPEL_DISABLE_LICENSE_DISCOVERY=1

    result = await get_call_graph(
        project_root=str(tmp_path),
        depth=100  # Requests more than limit
    )

    assert result.tier_applied == "community"
    assert result.max_depth_applied == 3  # Community limit

@pytest.mark.asyncio
async def test_pro_limits(pro_tier, tmp_path):
    """Test Pro tier has higher limits."""
    # pro_tier fixture sets CODE_SCALPEL_LICENSE_PATH to Pro license

    result = await get_call_graph(
        project_root=str(tmp_path),
        depth=100
    )

    assert result.tier_applied == "pro"
    assert result.max_depth_applied == 50  # Pro limit

License Files

Test licenses are in tests/licenses/ (git-ignored):

tests/licenses/
├── code_scalpel_license_pro_*.jwt
└── code_scalpel_license_enterprise_*.jwt

Don't Mock Tier Detection

Tier detection uses cryptographic JWT validation. Use actual license files via fixtures, not monkeypatching.

Test Markers

# Slow tests (skip with -m "not slow")
@pytest.mark.slow
def test_large_project_crawl():
    ...

# Tests requiring Docker
@pytest.mark.docker
def test_docker_deployment():
    ...

# Integration tests
@pytest.mark.integration
def test_end_to_end():
    ...

Coverage Requirements

  • Minimum: 95% combined (statement + branch)
  • Current: ~94.86%

Check Coverage

# With report
pytest --cov=src/code_scalpel --cov-report=term-missing tests/

# Fail if below threshold
pytest --cov=src/code_scalpel --cov-fail-under=95 tests/

Exclude from Coverage

if TYPE_CHECKING:  # pragma: no cover
    from expensive_import import Type

Test Data

Temporary Files

def test_with_temp_files(tmp_path):
    """Use pytest's tmp_path fixture."""
    file = tmp_path / "test.py"
    file.write_text("def foo(): pass")

    result = analyze_file(str(file))

Test Data Directory

import pytest
from pathlib import Path

@pytest.fixture
def test_data_dir():
    """Return path to test data directory."""
    return Path(__file__).parent / "data"

def test_with_data_file(test_data_dir):
    sample = test_data_dir / "sample.py"
    result = analyze_file(str(sample))

Common Patterns

Testing Async Tools

@pytest.mark.asyncio
async def test_async_tool():
    result = await some_async_tool()
    assert result.data is not None

Testing Error Cases

@pytest.mark.asyncio
async def test_file_not_found():
    result = await extract_code(
        file_path="/nonexistent/file.py",
        target_name="foo",
        target_type="function"
    )

    assert result.error is not None
    assert result.error["code"] == "FILE_NOT_FOUND"

Testing Oracle Recovery

@pytest.mark.asyncio
async def test_fuzzy_match_suggestion():
    result = await extract_code(
        file_path="utils.py",
        target_name="procss_order",  # Typo
        target_type="function"
    )

    assert result.error is not None
    assert "oracle" in result
    assert result["oracle"]["suggestions"][0]["name"] == "process_order"

CI Integration

Tests run automatically on:

  • Every push
  • Every pull request
  • Nightly builds

GitHub Actions workflow:

- name: Run Tests
  run: pytest --cov=src/code_scalpel tests/

- name: Check Coverage
  run: pytest --cov-fail-under=95 tests/