Files
operating-room-monitor-server/pyproject.toml
Kevin 4c3f9a367b feat(voice-client): PySide6 desktop client and Windows build scripts
Add voice_confirmation_client (poll, TTS MP3 playback, mic WAV resolve),
PyInstaller spec, start/build helpers, and API unit tests.

Pending manual testing: end-to-end on OR workstations and packaged exe.

Made-with: Cursor
2026-04-27 09:52:10 +08:00

69 lines
1.7 KiB
TOML

[project]
name = "operation-room-monitor-server"
version = "0.1.0"
description = "Operation room monitor API server"
requires-python = ">=3.13"
dependencies = [
"asyncpg>=0.31.0",
"psycopg[binary]>=3.2.0",
"greenlet>=3.1.0",
"minio>=7.2.15",
"baidu-aip>=4.16.13",
"chardet>=7.4.3",
"fastapi>=0.136.0",
"loguru>=0.7.3",
"pillow>=12.2.0",
"pydantic-settings>=2.13.1",
"python-multipart>=0.0.26",
"sqlalchemy>=2.0.49",
"ultralytics>=8.4.40",
"uvicorn[standard]>=0.44.0",
"rich>=15.0.0",
"pyyaml>=6.0.3",
]
[project.scripts]
operation-room-monitor-server = "main:main"
voice-confirmation-client = "voice_confirmation_client.__main__:main"
# Use PyTorch CPU wheels from the official index so:
# - Linux Docker builds (incl. Docker Desktop on Mac) do not install NVIDIA CUDA pip bundles.
# - Native macOS still resolves to the correct macosx_* wheels from the same index.
# For NVIDIA servers, use a separate CUDA torch install or override in a dedicated prod Dockerfile.
[tool.uv]
index-strategy = "unsafe-best-match"
[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
[tool.uv.sources]
torch = { index = "pytorch-cpu" }
torchvision = { index = "pytorch-cpu" }
[dependency-groups]
dev = [
"httpx>=0.28.0",
"pytest>=8.3.0",
"pytest-asyncio>=0.25.0",
"aiosqlite>=0.21.0",
"alembic>=1.14.0",
]
voice-client = [
"httpx>=0.28.0",
"numpy>=2.0.0",
"PySide6>=6.8.0",
"sounddevice>=0.5.0",
]
voice-client-build = [
"httpx>=0.28.0",
"numpy>=2.0.0",
"PySide6>=6.8.0",
"sounddevice>=0.5.0",
"pyinstaller>=6.0.0",
]
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]