bdd181425b
Docker Compose project with automated Playwright benchmarks comparing django-liveview 2.2.0 against Phoenix LiveView 1.0 across 6 scenarios.
136 lines
4.5 KiB
Python
136 lines
4.5 KiB
Python
"""
|
|
Benchmark runner - executes all scenarios against both frameworks and
|
|
writes a CSV + Markdown report to /app/results/.
|
|
"""
|
|
import os
|
|
import csv
|
|
import time
|
|
import statistics
|
|
from datetime import datetime
|
|
from pathlib import Path
|
|
from playwright.sync_api import sync_playwright
|
|
from performance_test import PerformanceTest
|
|
|
|
DJANGO_URL = os.environ.get("DJANGO_URL", "http://localhost:8001")
|
|
PHOENIX_URL = os.environ.get("PHOENIX_URL", "http://localhost:8002")
|
|
ITERATIONS = int(os.environ.get("ITERATIONS", "10"))
|
|
WARMUP = int(os.environ.get("WARMUP", "2"))
|
|
RESULTS_DIR = Path(os.environ.get("RESULTS_DIR", "/app/results"))
|
|
RESULTS_DIR.mkdir(parents=True, exist_ok=True)
|
|
|
|
COMMON_SCENARIOS = [
|
|
("add_alert", "Add alert"),
|
|
("delete_alert", "Delete alert"),
|
|
("search_filter", "Search / filter"),
|
|
]
|
|
|
|
EDGE_SCENARIOS = [
|
|
("large_list_add", "Add to large list (500 items)"),
|
|
("rapid_fire", "Rapid fire (5 clicks burst)"),
|
|
("empty_search", "Empty search (no match)"),
|
|
]
|
|
|
|
ALL_SCENARIOS = COMMON_SCENARIOS + EDGE_SCENARIOS
|
|
|
|
FRAMEWORKS = [
|
|
("django", DJANGO_URL, "Django LiveView"),
|
|
("phoenix", PHOENIX_URL, "Phoenix LiveView"),
|
|
]
|
|
|
|
|
|
def wait_for_app(url: str, label: str, retries: int = 30):
|
|
import requests
|
|
for attempt in range(retries):
|
|
try:
|
|
r = requests.get(url, timeout=5)
|
|
if r.status_code < 500:
|
|
print(f" {label} is up ({r.status_code})")
|
|
return
|
|
except Exception:
|
|
pass
|
|
print(f" Waiting for {label}... ({attempt + 1}/{retries})")
|
|
time.sleep(3)
|
|
raise RuntimeError(f"{label} did not become ready at {url}")
|
|
|
|
|
|
def summarise(values: list[float]) -> dict:
|
|
if not values:
|
|
return {}
|
|
return {
|
|
"avg": round(statistics.mean(values), 2),
|
|
"median": round(statistics.median(values), 2),
|
|
"stdev": round(statistics.stdev(values), 2) if len(values) > 1 else 0.0,
|
|
"min": round(min(values), 2),
|
|
"max": round(max(values), 2),
|
|
}
|
|
|
|
|
|
def run_all():
|
|
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
csv_path = RESULTS_DIR / f"results_{ts}.csv"
|
|
md_path = RESULTS_DIR / f"report_{ts}.md"
|
|
|
|
# Wait for both apps
|
|
print("\n=== Waiting for apps to be ready ===")
|
|
for _, url, label in FRAMEWORKS:
|
|
wait_for_app(url, label)
|
|
|
|
all_rows: list[dict] = []
|
|
|
|
with sync_playwright() as p:
|
|
for fw_key, fw_url, fw_label in FRAMEWORKS:
|
|
print(f"\n=== Benchmarking: {fw_label} ===")
|
|
browser = p.chromium.launch(headless=True)
|
|
|
|
for scenario_key, scenario_label in ALL_SCENARIOS:
|
|
edge = scenario_key in [s[0] for s in EDGE_SCENARIOS]
|
|
iters = max(ITERATIONS // 2, 3) if edge else ITERATIONS
|
|
warmup = 1 if edge else WARMUP
|
|
|
|
print(f" [{scenario_label}] {iters} iterations, {warmup} warmup...")
|
|
page = browser.new_page()
|
|
try:
|
|
test = PerformanceTest(page, fw_url, fw_label)
|
|
raw = test.run_scenario(scenario_key, iters, warmup)
|
|
for idx, r in enumerate(raw):
|
|
all_rows.append({
|
|
"framework": fw_label,
|
|
"scenario_key": scenario_key,
|
|
"scenario": scenario_label,
|
|
"iteration": idx + 1,
|
|
"ms": r["ms"],
|
|
"ws_sent_b": r["ws_sent"],
|
|
"ws_recv_b": r["ws_recv"],
|
|
})
|
|
ms_list = [r["ms"] for r in raw]
|
|
s = summarise(ms_list)
|
|
print(f" avg={s['avg']}ms median={s['median']}ms stdev={s['stdev']}ms")
|
|
except Exception as exc:
|
|
print(f" ERROR: {exc}")
|
|
finally:
|
|
page.close()
|
|
|
|
browser.close()
|
|
|
|
# Write CSV
|
|
if all_rows:
|
|
with open(csv_path, "w", newline="") as f:
|
|
writer = csv.DictWriter(f, fieldnames=all_rows[0].keys())
|
|
writer.writeheader()
|
|
writer.writerows(all_rows)
|
|
print(f"\nCSV saved: {csv_path}")
|
|
|
|
# Write Markdown report
|
|
_write_markdown(all_rows, md_path, ts)
|
|
print(f"Report saved: {md_path}")
|
|
|
|
|
|
def _write_markdown(rows: list[dict], path: Path, ts: str):
|
|
from generate_report import build_markdown
|
|
md = build_markdown(rows, ts)
|
|
path.write_text(md)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
run_all()
|