bench.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. """
  2. Radionica3D API Throughput Benchmark
  3. =====================================
  4. Measures: Order creation, status update, and deletion throughput.
  5. Usage:
  6. cd backend
  7. python scratch/bench.py
  8. Requires: pip install httpx
  9. """
  10. import asyncio
  11. import httpx
  12. import time
  13. import statistics
  14. import os
  15. BASE_URL = "http://localhost:8000"
  16. # ──────────────────────────────────────────────
  17. # CONFIG — fill in a valid admin token or leave
  18. # empty to auto-login (requires correct creds).
  19. # ──────────────────────────────────────────────
  20. ADMIN_EMAIL = "admin@radionica3d.com"
  21. ADMIN_PASSWORD = "admin123" # change if needed
  22. MATERIAL_ID = 1 # must exist in the DB
  23. CONCURRENCY = 10 # parallel requests per batch
  24. ITERATIONS = 30 # total operations per scenario
  25. # ──────────────────────────────────────────────
  26. async def get_admin_token(client: httpx.AsyncClient) -> str:
  27. resp = await client.post(
  28. f"{BASE_URL}/auth/login",
  29. json={"email": ADMIN_EMAIL, "password": ADMIN_PASSWORD},
  30. )
  31. resp.raise_for_status()
  32. token = resp.json().get("access_token")
  33. if not token:
  34. raise RuntimeError(f"No token in response: {resp.json()}")
  35. print("[OK] Logged in as admin")
  36. return token
  37. async def create_one_order(client: httpx.AsyncClient, token: str, idx: int) -> tuple[float, int]:
  38. """Returns (latency_ms, status_code)"""
  39. # Use admin-created order (no file slicing, no rate limit)
  40. form = {
  41. "first_name": ("", f"Bench{idx}"),
  42. "last_name": ("", "Test"),
  43. "phone": ("", "+38269000000"),
  44. "email": ("", f"bench_{idx}@test.local"),
  45. "shipping_address": ("", "Bench Street 1"),
  46. "material_id": ("", str(MATERIAL_ID)),
  47. "quantity": ("", "1"),
  48. "color_name": ("", "White"),
  49. "file_ids": ("", "[]"),
  50. "file_quantities": ("", "[]"),
  51. }
  52. t0 = time.monotonic()
  53. resp = await client.post(
  54. f"{BASE_URL}/orders",
  55. headers={"Authorization": f"Bearer {token}"},
  56. files=form,
  57. )
  58. latency = (time.monotonic() - t0) * 1000
  59. return latency, resp.status_code, resp.json().get("order_id") or resp.json().get("id")
  60. async def update_order_status(client: httpx.AsyncClient, token: str, order_id: int) -> tuple[float, int]:
  61. t0 = time.monotonic()
  62. resp = await client.patch(
  63. f"{BASE_URL}/orders/{order_id}",
  64. headers={"Authorization": f"Bearer {token}"},
  65. json={"status": "processing"},
  66. )
  67. latency = (time.monotonic() - t0) * 1000
  68. body = resp.text[:200]
  69. return latency, resp.status_code, body
  70. async def delete_order(client: httpx.AsyncClient, token: str, order_id: int) -> tuple[float, int]:
  71. t0 = time.monotonic()
  72. resp = await client.delete(
  73. f"{BASE_URL}/orders/{order_id}/admin",
  74. headers={"Authorization": f"Bearer {token}"},
  75. )
  76. latency = (time.monotonic() - t0) * 1000
  77. return latency, resp.status_code
  78. def print_stats(label: str, latencies: list[float], duration: float, count: int):
  79. ok = len(latencies)
  80. rps = ok / duration
  81. rpm = rps * 60
  82. print(f"\n-- {label} --")
  83. print(f" Requests sent : {count}")
  84. print(f" Succeeded (2xx) : {ok}")
  85. print(f" Failed : {count - ok}")
  86. if latencies:
  87. print(f" Avg latency : {statistics.mean(latencies):.0f} ms")
  88. print(f" P50 : {statistics.median(latencies):.0f} ms")
  89. print(f" P95 : {sorted(latencies)[int(len(latencies)*0.95)]:.0f} ms")
  90. print(f" Max : {max(latencies):.0f} ms")
  91. print(f" Total time : {duration:.2f}s")
  92. print(f" Throughput : {rps:.1f} req/s ~ {rpm:.0f} req/min")
  93. async def run_concurrent(tasks):
  94. """Run tasks in batches of CONCURRENCY"""
  95. results = []
  96. for i in range(0, len(tasks), CONCURRENCY):
  97. batch = tasks[i:i + CONCURRENCY]
  98. batch_results = await asyncio.gather(*batch, return_exceptions=True)
  99. results.extend(batch_results)
  100. return results
  101. async def bench_create(client, token) -> list[int]:
  102. """Benchmark creation, return list of created order IDs."""
  103. tasks = [create_one_order(client, token, i) for i in range(ITERATIONS)]
  104. print(f"\n>> Creating {ITERATIONS} orders ({CONCURRENCY} concurrent)...")
  105. t_start = time.monotonic()
  106. raw = await run_concurrent(tasks)
  107. duration = time.monotonic() - t_start
  108. latencies, order_ids = [], []
  109. for r in raw:
  110. if isinstance(r, Exception):
  111. print(f" [error] {r}")
  112. continue
  113. lat, code, order_id = r
  114. if 200 <= code < 300 and order_id:
  115. latencies.append(lat)
  116. order_ids.append(order_id)
  117. else:
  118. print(f" [fail] status={code}")
  119. print_stats("CREATE", latencies, duration, ITERATIONS)
  120. return order_ids
  121. async def bench_update(client, token, order_ids: list[int]):
  122. """Benchmark status update for all created orders."""
  123. tasks = [update_order_status(client, token, oid) for oid in order_ids]
  124. print(f"\n>> Updating {len(order_ids)} orders ({CONCURRENCY} concurrent)...")
  125. t_start = time.monotonic()
  126. raw = await run_concurrent(tasks)
  127. duration = time.monotonic() - t_start
  128. latencies = []
  129. for r in raw:
  130. if isinstance(r, Exception):
  131. print(f" [error] {r}")
  132. continue
  133. lat, code, body = r
  134. if 200 <= code < 300:
  135. latencies.append(lat)
  136. else:
  137. print(f" [fail] status={code} body={body}")
  138. print_stats("UPDATE STATUS", latencies, duration, len(order_ids))
  139. async def bench_delete(client, token, order_ids: list[int]):
  140. """Benchmark deletion for all created orders."""
  141. tasks = [delete_order(client, token, oid) for oid in order_ids]
  142. print(f"\n>> Deleting {len(order_ids)} orders ({CONCURRENCY} concurrent)...")
  143. t_start = time.monotonic()
  144. raw = await run_concurrent(tasks)
  145. duration = time.monotonic() - t_start
  146. latencies = []
  147. for r in raw:
  148. if isinstance(r, Exception):
  149. continue
  150. lat, code = r
  151. if 200 <= code < 300:
  152. latencies.append(lat)
  153. print_stats("DELETE", latencies, duration, len(order_ids))
  154. async def main():
  155. print("=" * 50)
  156. print(" Radionica3D API Throughput Benchmark")
  157. print(f" Target : {BASE_URL}")
  158. print(f" Concurrency : {CONCURRENCY} Iterations : {ITERATIONS}")
  159. print("=" * 50)
  160. limits = httpx.Limits(max_connections=CONCURRENCY + 5, max_keepalive_connections=CONCURRENCY)
  161. async with httpx.AsyncClient(timeout=30.0, limits=limits) as client:
  162. try:
  163. token = await get_admin_token(client)
  164. except Exception as e:
  165. print(f"Login failed: {e}")
  166. return
  167. # 1. Create
  168. order_ids = await bench_create(client, token)
  169. if not order_ids:
  170. print("\nNo orders created, stopping.")
  171. return
  172. # 2. Update
  173. await bench_update(client, token, order_ids)
  174. # 3. Delete
  175. await bench_delete(client, token, order_ids)
  176. print("\n[OK] Benchmark complete. Check server logs for any errors.")
  177. if __name__ == "__main__":
  178. asyncio.run(main())