- 新增 Alembic 初始迁移、领域明细模型及归档持久化与重试链路\n- 拆分视频会话注册表、分类处理、推理时间窗聚合与流处理\n- 消耗日志:TSV/Markdown 含 top2/top3;item_id 优先产品编码;待确认记「待确认」行,语音确认后落正式行并更新汇总\n- 待确认时内存/DB 明细为占位行,确认后替换;拒绝时移除占位\n- 分类 probs 先 detach/cpu 再转 NumPy,修复 MPS/CUDA 上推理被静默跳过\n- 补充集成测试、归档与设备张量等单测 Made-with: Cursor
54 lines
1.3 KiB
TOML
54 lines
1.3 KiB
TOML
[project]
|
|
name = "operation-room-monitor-server"
|
|
version = "0.1.0"
|
|
description = "Operation room monitor API server"
|
|
requires-python = ">=3.13"
|
|
dependencies = [
|
|
"asyncpg>=0.31.0",
|
|
"greenlet>=3.1.0",
|
|
"minio>=7.2.15",
|
|
"baidu-aip>=4.16.13",
|
|
"chardet>=7.4.3",
|
|
"fastapi>=0.136.0",
|
|
"loguru>=0.7.3",
|
|
"openpyxl>=3.1.5",
|
|
"pillow>=12.2.0",
|
|
"pydantic-settings>=2.13.1",
|
|
"python-multipart>=0.0.26",
|
|
"sqlalchemy>=2.0.49",
|
|
"ultralytics>=8.4.40",
|
|
"uvicorn[standard]>=0.44.0",
|
|
"rich>=15.0.0",
|
|
]
|
|
|
|
[project.scripts]
|
|
operation-room-monitor-server = "main:main"
|
|
|
|
# Use PyTorch CPU wheels from the official index so:
|
|
# - Linux Docker builds (incl. Docker Desktop on Mac) do not install NVIDIA CUDA pip bundles.
|
|
# - Native macOS still resolves to the correct macosx_* wheels from the same index.
|
|
# For NVIDIA servers, use a separate CUDA torch install or override in a dedicated prod Dockerfile.
|
|
[tool.uv]
|
|
index-strategy = "unsafe-best-match"
|
|
|
|
[[tool.uv.index]]
|
|
name = "pytorch-cpu"
|
|
url = "https://download.pytorch.org/whl/cpu"
|
|
|
|
[tool.uv.sources]
|
|
torch = { index = "pytorch-cpu" }
|
|
torchvision = { index = "pytorch-cpu" }
|
|
|
|
[dependency-groups]
|
|
dev = [
|
|
"httpx>=0.28.0",
|
|
"pytest>=8.3.0",
|
|
"pytest-asyncio>=0.25.0",
|
|
"aiosqlite>=0.21.0",
|
|
"alembic>=1.14.0",
|
|
]
|
|
|
|
[tool.pytest.ini_options]
|
|
asyncio_mode = "auto"
|
|
testpaths = ["tests"]
|