Files
operating-room-monitor-server/pyproject.toml
Kevin 0c05463617 feat: 语音确认、联调与运维增强
- 语音:序数解析(第一个/第二个等)、解析失败计数与 API detail.retry_remaining;
  百度 ASR 固定 dev_pid 为普通话;SurgeryPipelineError 支持 extra 并入 HTTP detail。
- Demo:demo 路由与假 RTSP、客户端 index 与 README;BackendResolver 与配置调整。
- 可观测:消耗 TSV 日志、语音文件日志、终端 Markdown 辅助;相关测试与依赖更新。
- 注意:.env 仍被 gitignore,本地密钥不会进入本提交。

Made-with: Cursor
2026-04-23 14:24:20 +08:00

53 lines
1.3 KiB
TOML

[project]
name = "operation-room-monitor-server"
version = "0.1.0"
description = "Operation room monitor API server"
requires-python = ">=3.13"
dependencies = [
"asyncpg>=0.31.0",
"greenlet>=3.1.0",
"minio>=7.2.15",
"baidu-aip>=4.16.13",
"chardet>=7.4.3",
"fastapi>=0.136.0",
"loguru>=0.7.3",
"openpyxl>=3.1.5",
"pillow>=12.2.0",
"pydantic-settings>=2.13.1",
"python-multipart>=0.0.26",
"sqlalchemy>=2.0.49",
"ultralytics>=8.4.40",
"uvicorn[standard]>=0.44.0",
"rich>=15.0.0",
]
[project.scripts]
operation-room-monitor-server = "main:main"
# Use PyTorch CPU wheels from the official index so:
# - Linux Docker builds (incl. Docker Desktop on Mac) do not install NVIDIA CUDA pip bundles.
# - Native macOS still resolves to the correct macosx_* wheels from the same index.
# For NVIDIA servers, use a separate CUDA torch install or override in a dedicated prod Dockerfile.
[tool.uv]
index-strategy = "unsafe-best-match"
[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
[tool.uv.sources]
torch = { index = "pytorch-cpu" }
torchvision = { index = "pytorch-cpu" }
[dependency-groups]
dev = [
"httpx>=0.28.0",
"pytest>=8.3.0",
"pytest-asyncio>=0.25.0",
"aiosqlite>=0.21.0",
]
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]