Files
operating-room-monitor-server/pyproject.toml
Kevin 8a4bad99d3 feat: 配置写死与 baked 模块,Alembic 建表,百度仅 BAIDU_*
- 新增 app/baked/algorithm|pipeline,非部署参数不再走 env;Settings 保留 DB/HTTP/RTSP/海康/百度/MinIO/Demo
- 移除 init_db_schema 与 reload 配置;main 仅 check_database;start*.sh 在 uvicorn 前执行 alembic upgrade head
- 依赖 psycopg[binary] 供 Alembic 同步 URL;alembic/env 注释与预发清单更新
- 撕段门控消费管线、各视频/语音/归档调用改为 baked
- 百度环境变量仅 BAIDU_APP_ID、BAIDU_API_KEY、BAIDU_SECRET_KEY 与 BAIDU_* 超时/ASR;人脸脚本与 baidu_speech 文案同步
- 全量单测与 .env.example 更新;.gitignore 忽略 refs/(本地权重/视频不入库)

Made-with: Cursor
2026-04-24 15:33:22 +08:00

55 lines
1.4 KiB
TOML

[project]
name = "operation-room-monitor-server"
version = "0.1.0"
description = "Operation room monitor API server"
requires-python = ">=3.13"
dependencies = [
"asyncpg>=0.31.0",
"psycopg[binary]>=3.2.0",
"greenlet>=3.1.0",
"minio>=7.2.15",
"baidu-aip>=4.16.13",
"chardet>=7.4.3",
"fastapi>=0.136.0",
"loguru>=0.7.3",
"pillow>=12.2.0",
"pydantic-settings>=2.13.1",
"python-multipart>=0.0.26",
"sqlalchemy>=2.0.49",
"ultralytics>=8.4.40",
"uvicorn[standard]>=0.44.0",
"rich>=15.0.0",
"pyyaml>=6.0.3",
]
[project.scripts]
operation-room-monitor-server = "main:main"
# Use PyTorch CPU wheels from the official index so:
# - Linux Docker builds (incl. Docker Desktop on Mac) do not install NVIDIA CUDA pip bundles.
# - Native macOS still resolves to the correct macosx_* wheels from the same index.
# For NVIDIA servers, use a separate CUDA torch install or override in a dedicated prod Dockerfile.
[tool.uv]
index-strategy = "unsafe-best-match"
[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
[tool.uv.sources]
torch = { index = "pytorch-cpu" }
torchvision = { index = "pytorch-cpu" }
[dependency-groups]
dev = [
"httpx>=0.28.0",
"pytest>=8.3.0",
"pytest-asyncio>=0.25.0",
"aiosqlite>=0.21.0",
"alembic>=1.14.0",
]
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]