konduktor/benchmarks/bench_path_matcher.py
2025-12-03 12:54:45 +03:00

216 lines
7.2 KiB
Python

#!/usr/bin/env python3
"""
Benchmark script for path_matcher performance comparison.
Compares:
- Pure Python implementation
- Cython implementation (if available)
- Original MountedApp from asgi_mount.py
Usage:
python benchmarks/bench_path_matcher.py
"""
import time
import statistics
from typing import Callable, List, Tuple
from pyserve._path_matcher_py import (
FastMountedPath as PyFastMountedPath,
FastMountManager as PyFastMountManager,
path_matches_prefix as py_path_matches_prefix,
)
try:
from pyserve._path_matcher import (
FastMountedPath as CyFastMountedPath,
FastMountManager as CyFastMountManager,
path_matches_prefix as cy_path_matches_prefix,
)
CYTHON_AVAILABLE = True
except ImportError:
CYTHON_AVAILABLE = False
print("Cython module not compiled. Run: python setup_cython.py build_ext --inplace\n")
from pyserve.asgi_mount import MountedApp
def benchmark(func: Callable, iterations: int = 100000) -> Tuple[float, float]:
times = []
for _ in range(1000):
func()
for _ in range(iterations):
start = time.perf_counter_ns()
func()
end = time.perf_counter_ns()
times.append(end - start)
return statistics.mean(times), statistics.stdev(times)
def format_time(ns: float) -> str:
if ns < 1000:
return f"{ns:.1f} ns"
elif ns < 1_000_000:
return f"{ns/1000:.2f} µs"
else:
return f"{ns/1_000_000:.2f} ms"
def run_benchmarks():
print("=" * 70)
print("PATH MATCHER BENCHMARK")
print("=" * 70)
print()
# Test paths
mount_path = "/api/v1"
test_paths = [
"/api/v1/users/123/posts", # Matching - long
"/api/v1", # Matching - exact
"/api/v2/users", # Not matching - similar prefix
"/other/path", # Not matching - completely different
]
iterations = 100000
# =========================================================================
# Benchmark 1: Single path matching
# =========================================================================
print("BENCHMARK 1: Single Path Matching")
print("-" * 70)
print(f" Mount path: {mount_path}")
print(f" Iterations: {iterations:,}")
print()
results = {}
# Original MountedApp
original_mount = MountedApp(mount_path, app=None, name="test") # type: ignore
for test_path in test_paths:
print(f" Test path: {test_path}")
# Original
mean, std = benchmark(lambda: original_mount.matches(test_path), iterations)
results[("Original", test_path)] = mean
print(f" Original MountedApp: {format_time(mean):>12} ± {format_time(std)}")
# Pure Python
py_mount = PyFastMountedPath(mount_path)
mean, std = benchmark(lambda: py_mount.matches(test_path), iterations)
results[("Python", test_path)] = mean
print(f" Pure Python: {format_time(mean):>12} ± {format_time(std)}")
# Cython (if available)
if CYTHON_AVAILABLE:
cy_mount = CyFastMountedPath(mount_path)
mean, std = benchmark(lambda: cy_mount.matches(test_path), iterations)
results[("Cython", test_path)] = mean
print(f" Cython: {format_time(mean):>12} ± {format_time(std)}")
print()
# =========================================================================
# Benchmark 2: Mount Manager lookup
# =========================================================================
print()
print("BENCHMARK 2: Mount Manager Lookup (10 mounts)")
print("-" * 70)
# Setup managers with 10 mounts
mount_paths = [f"/api/v{i}" for i in range(10)]
py_manager = PyFastMountManager()
for p in mount_paths:
py_manager.add_mount(PyFastMountedPath(p, name=p))
if CYTHON_AVAILABLE:
cy_manager = CyFastMountManager()
for p in mount_paths:
cy_manager.add_mount(CyFastMountedPath(p, name=p))
test_lookups = [
"/api/v5/users/123", # Middle mount
"/api/v0/items", # First mount (longest)
"/api/v9/data", # Last mount
"/other/not/found", # No match
]
for test_path in test_lookups:
print(f" Lookup path: {test_path}")
# Pure Python
mean, std = benchmark(lambda: py_manager.get_mount(test_path), iterations)
print(f" Pure Python: {format_time(mean):>12} ± {format_time(std)}")
# Cython
if CYTHON_AVAILABLE:
mean, std = benchmark(lambda: cy_manager.get_mount(test_path), iterations)
print(f" Cython: {format_time(mean):>12} ± {format_time(std)}")
print()
# =========================================================================
# Benchmark 3: Combined match + modify
# =========================================================================
print()
print("BENCHMARK 3: Combined Match + Modify Path")
print("-" * 70)
from pyserve._path_matcher_py import match_and_modify_path as py_match_modify
if CYTHON_AVAILABLE:
from pyserve._path_matcher import match_and_modify_path as cy_match_modify
test_path = "/api/v1/users/123/posts"
print(f" Test path: {test_path}")
print(f" Mount path: {mount_path}")
print()
# Original (separate calls)
def original_match_modify():
if original_mount.matches(test_path):
return original_mount.get_modified_path(test_path)
return None
mean, std = benchmark(original_match_modify, iterations)
print(f" Original (2 calls): {format_time(mean):>12} ± {format_time(std)}")
# Pure Python combined
mean, std = benchmark(lambda: py_match_modify(test_path, mount_path), iterations)
print(f" Pure Python (combined): {format_time(mean):>12} ± {format_time(std)}")
# Cython combined
if CYTHON_AVAILABLE:
mean, std = benchmark(lambda: cy_match_modify(test_path, mount_path), iterations)
print(f" Cython (combined): {format_time(mean):>12} ± {format_time(std)}")
# =========================================================================
# Summary
# =========================================================================
print()
print("=" * 70)
print("SUMMARY")
print("=" * 70)
if CYTHON_AVAILABLE:
print("Cython module is available and was benchmarked")
else:
print("Cython module not available - only Pure Python was benchmarked")
print(" To build Cython module:")
print(" 1. Install Cython: pip install cython")
print(" 2. Build: python setup_cython.py build_ext --inplace")
print()
print("The optimized path matcher provides:")
print(" - Pre-computed path length and trailing slash")
print(" - Boundary-aware prefix matching (prevents /api matching /api-v2)")
print(" - Combined match+modify operation to reduce function calls")
print(" - Longest-prefix-first ordering in MountManager")
if __name__ == "__main__":
run_benchmarks()