Python pytest
Testing patterns with pytest fixtures, parametrize, and mocking.
Test Discovery and Basic Assertions
pytest finds test_*.py files and test_ functions automatically — no boilerplate
# test_devices.py -- pytest discovers this file by naming convention
import pytest
def test_device_creation():
device = Device(hostname="switch-01", ip="10.50.1.100", vlan=10)
assert device.hostname == "switch-01"
assert device.vlan == 10
def test_invalid_vlan():
with pytest.raises(ValueError, match="out of range"):
Device(hostname="switch-01", ip="10.50.1.100", vlan=9999)
def test_device_list_not_empty():
devices = get_all_devices()
assert devices # truthy check -- fails on empty list
assert len(devices) >= 1
Run tests — common invocations
pytest # all tests
pytest test_devices.py # single file
pytest test_devices.py::test_device_creation # single test
pytest -v # verbose -- show each test name
pytest -x # stop on first failure
pytest -k "vlan" # run tests matching keyword
pytest --tb=short # shorter tracebacks
Fixtures
Fixtures provide test dependencies — setup, teardown, and shared state
import pytest
@pytest.fixture
def sample_device():
"""Create a device for testing -- available as function argument."""
return Device(hostname="test-sw", ip="10.50.1.200", vlan=10)
@pytest.fixture
def db_session():
"""Yield fixture -- code after yield runs as teardown."""
session = create_test_session()
yield session
session.rollback()
session.close()
def test_device_save(db_session, sample_device):
# Both fixtures injected by argument name
db_session.add(sample_device)
assert db_session.query(Device).count() == 1
Fixture Scopes
Scope controls how often fixture runs — function (default), class, module, session
import pytest
@pytest.fixture(scope="session")
def app():
"""Created once for entire test suite -- expensive setup like app creation."""
return create_app(testing=True)
@pytest.fixture(scope="module")
def db_pool():
"""Created once per test file -- shared DB connection pool."""
pool = create_pool()
yield pool
pool.close()
@pytest.fixture(scope="function") # default
def clean_db(db_pool):
"""Runs before each test -- ensures clean state."""
db_pool.execute("DELETE FROM devices")
yield db_pool
Parametrize
Run same test with different inputs — multiplies test cases without code duplication
import pytest
@pytest.mark.parametrize("vlan,expected", [
(1, True),
(10, True),
(4094, True),
(0, False),
(4095, False),
(-1, False),
])
def test_valid_vlan(vlan, expected):
assert is_valid_vlan(vlan) == expected
@pytest.mark.parametrize("mac", [
"AA:BB:CC:DD:EE:FF",
"aa:bb:cc:dd:ee:ff",
"00:11:22:33:44:55",
])
def test_normalize_mac(mac):
result = normalize_mac(mac)
assert result == mac.upper()
Markers
skip, xfail, and custom markers — control which tests run and expected outcomes
import pytest
import shutil
@pytest.mark.skip(reason="ISE API not available in CI")
def test_ise_integration():
...
@pytest.mark.skipif(
not shutil.which("vault"),
reason="Vault CLI not installed",
)
def test_vault_read():
...
@pytest.mark.xfail(reason="Known bug in upstream library -- tracking issue #42")
def test_edge_case():
...
# Custom marker -- register in pyproject.toml under [tool.pytest.ini_options] markers
@pytest.mark.slow
def test_full_sync():
...
Run or skip by marker
pytest -m "not slow" # skip slow tests
pytest -m "slow or integration" # run slow and integration tests
conftest.py
Shared fixtures in conftest.py — pytest discovers them automatically by directory
# conftest.py -- fixtures available to all tests in this directory and below
import pytest
from fastapi.testclient import TestClient
from app.main import app
@pytest.fixture
def client():
return TestClient(app)
@pytest.fixture
def auth_headers():
return {"Authorization": "Bearer test-token-123"}
Built-in Fixtures
tmp_path, monkeypatch, capfd — powerful built-ins that replace manual mocking
import pytest
def test_write_config(tmp_path):
"""tmp_path -- unique temporary directory per test, cleaned up automatically."""
config_file = tmp_path / "config.json"
write_config(config_file, {"hostname": "switch-01"})
assert config_file.exists()
assert "switch-01" in config_file.read_text()
def test_env_override(monkeypatch):
"""monkeypatch -- temporarily modify env vars, attributes, dicts."""
monkeypatch.setenv("DOMUS_DEBUG", "true")
monkeypatch.setattr("app.config.settings.debug", True)
assert get_debug_mode() is True
def test_cli_output(capfd):
"""capfd -- capture stdout/stderr from subprocesses and print statements."""
print_device_summary()
captured = capfd.readouterr()
assert "switch-01" in captured.out
FastAPI TestClient Pattern
Full integration test pattern for FastAPI with dependency overrides
import pytest
from fastapi.testclient import TestClient
from app.main import app
from app.dependencies import get_db
def get_test_db():
db = TestSessionLocal()
try:
yield db
finally:
db.close()
@pytest.fixture
def client():
app.dependency_overrides[get_db] = get_test_db
with TestClient(app) as c:
yield c
app.dependency_overrides.clear()
def test_create_and_retrieve(client):
# Create
resp = client.post("/devices", json={"hostname": "sw-01", "ip": "10.50.1.1", "vlan": 10})
assert resp.status_code == 201
device_id = resp.json()["id"]
# Retrieve
resp = client.get(f"/devices/{device_id}")
assert resp.status_code == 200
assert resp.json()["hostname"] == "sw-01"
Coverage
pytest-cov measures which lines your tests actually exercise
pytest --cov=app --cov-report=term-missing # show uncovered lines
pytest --cov=app --cov-report=html # HTML report in htmlcov/
pytest --cov=app --cov-fail-under=80 # fail if coverage < 80%