diff --git a/.env.example b/.env.example index d18661b..ef8e705 100644 --- a/.env.example +++ b/.env.example @@ -19,9 +19,13 @@ POSTGRES_PORT=35432 # --- Video:RTSP 与按路后端(须与客户端 camera_ids 一致)--- # VIDEO_DEFAULT_BACKEND=rtsp # VIDEO_CAMERA_BACKEND_OVERRIDES_JSON={"or-cam-01":"rtsp","or-cam-02":"hikvision_sdk"} -# VIDEO_RTSP_URLS_JSON_FILE=app/resources/camera_rtsp_urls.sample.json -# VIDEO_RTSP_URLS_JSON={"or-cam-01":"rtsp://..."} +# 站点 JSON:术间↔摄像头↔语音终端只在这里维护(voice_or_room_bindings);须同时含 video_rtsp_urls;可为 []。 +# 见 app/resources/or_site_config.sample.json +# OR_SITE_CONFIG_JSON_FILE=app/resources/or_site_config.sample.json # VIDEO_RTSP_URL_TEMPLATE=rtsp://user:pass@host:554/path/{camera_id} +# +# 语音桌面客户端的「本机是哪一个 voice_terminal_id」不在此文件配置:在客户端界面填写, +# 或在该客户端运行环境的 shell / systemd 里设 VOICE_TERMINAL_ID(与 JSON 中对应值一致即可)。 # --- 海康 SDK(Linux x86_64;二进制勿提交仓库)--- # HIKVISION_LIB_DIR=/opt/hikvision/lib diff --git a/app/api.py b/app/api.py index 3fd7d98..a4cbc8a 100644 --- a/app/api.py +++ b/app/api.py @@ -2,7 +2,17 @@ import asyncio from collections.abc import Awaitable, Callable from typing import Annotated -from fastapi import APIRouter, Depends, File, HTTPException, Path, UploadFile, status +from fastapi import ( + APIRouter, + Depends, + File, + HTTPException, + Path, + Query, + UploadFile, + WebSocket, + status, +) from fastapi.responses import JSONResponse from loguru import logger from sqlalchemy.exc import SQLAlchemyError @@ -10,7 +20,7 @@ from sqlalchemy.exc import SQLAlchemyError from app.baked import pipeline as bp from app.config import settings from app.database import check_database -from app.dependencies import get_surgery_pipeline +from app.dependencies import get_surgery_pipeline, get_voice_terminal_hub from app.schemas import ( HealthResponse, SurgeryApiResponse, @@ -20,9 +30,14 @@ from app.schemas import ( SurgeryPendingConfirmationResponse, SurgeryResultResponse, SurgeryStartRequest, + VoiceTerminalAssignmentResponse, build_consumption_summary, ) from app.services.surgery_pipeline import SurgeryPipeline +from app.services.voice_terminal_hub import ( + VoiceTerminalHub, + assign_voice_terminal_after_recording_started, +) from app.surgery_errors import SurgeryPipelineError router = APIRouter() @@ -123,13 +138,13 @@ async def health() -> HealthResponse | JSONResponse: description="供 demo 页探测:是否启用 orchestrator、RTSP 文件配置等;此路由始终存在,不依赖 DEMO_ORCHESTRATOR_ENABLED。", ) async def demo_orchestrator_status() -> dict: - f = (settings.video_rtsp_urls_json_file or "").strip() + f = (settings.or_site_config_json_file or "").strip() return { "orchestrator_enabled": bool(settings.demo_orchestrator_enabled), "orchestrate_method": "POST", "orchestrate_path": "/internal/demo/orchestrate-and-start", - "video_rtsp_urls_json_file_set": bool(f), - "video_rtsp_urls_json_file": f or None, + "or_site_config_json_file_set": bool(f), + "or_site_config_json_file": f or None, "orchestrator_rtsp_port": settings.demo_orchestrator_rtsp_port, "orchestrator_rtsp_json_host": settings.demo_orchestrator_rtsp_json_host, } @@ -157,6 +172,7 @@ async def demo_orchestrator_status() -> dict: async def start_surgery( payload: SurgeryStartRequest, pipeline: Annotated[SurgeryPipeline, Depends(get_surgery_pipeline)], + voice_hub: Annotated[VoiceTerminalHub, Depends(get_voice_terminal_hub)], ) -> SurgeryApiResponse: logger.info( "Start surgery: surgery_id={}, cameras={}, candidates={}", @@ -180,6 +196,14 @@ async def start_surgery( ) except SurgeryPipelineError as exc: _raise_surgery_pipeline_http(exc, payload.surgery_id) + + await assign_voice_terminal_after_recording_started( + voice_hub, + surgery_id=payload.surgery_id, + camera_ids=list(payload.camera_ids), + set_voice_terminal_id=pipeline.set_voice_terminal_id, + ) + return SurgeryApiResponse( surgery_id=payload.surgery_id, status="accepted", @@ -209,11 +233,14 @@ async def start_surgery( async def end_surgery( payload: SurgeryEndRequest, pipeline: Annotated[SurgeryPipeline, Depends(get_surgery_pipeline)], + voice_hub: Annotated[VoiceTerminalHub, Depends(get_voice_terminal_hub)], ) -> SurgeryApiResponse: logger.info("End surgery: surgery_id={}", payload.surgery_id) + voice_terminal_id: str | None = None try: async def _stop() -> None: - await pipeline.stop_recording(payload.surgery_id) + nonlocal voice_terminal_id + voice_terminal_id = await pipeline.stop_recording(payload.surgery_id) await _call_recording_with_retries( _stop, @@ -223,6 +250,10 @@ async def end_surgery( ) except SurgeryPipelineError as exc: _raise_surgery_pipeline_http(exc, payload.surgery_id) + + if voice_terminal_id: + await voice_hub.notify_end(voice_terminal_id, payload.surgery_id) + return SurgeryApiResponse( surgery_id=payload.surgery_id, status="accepted", @@ -230,6 +261,33 @@ async def end_surgery( ) +@router.get( + "/client/voice-terminals/{terminal_id}/assignment", + response_model=VoiceTerminalAssignmentResponse, + tags=["client"], + summary="查询语音终端当前指派的手术", + description="供桌面客户端在 WebSocket 不可用时的轮询兜底;与 WS 推送的 assignment 状态一致。", +) +async def get_voice_terminal_assignment( + terminal_id: Annotated[str, Path(min_length=1, max_length=256)], + hub: Annotated[VoiceTerminalHub, Depends(get_voice_terminal_hub)], +) -> VoiceTerminalAssignmentResponse: + tid = terminal_id.strip() + return VoiceTerminalAssignmentResponse( + voice_terminal_id=tid, + active_surgery_id=hub.get_assignment(tid), + ) + + +@router.websocket("/client/voice-terminals/ws") +async def voice_terminal_websocket( + websocket: WebSocket, + terminal_id: Annotated[str, Query(..., min_length=1, max_length=256)], +) -> None: + container = websocket.app.state.container + await container.voice_terminal_hub.handle_websocket(websocket, terminal_id) + + @router.get( "/client/surgeries/{surgery_id}/result", response_model=SurgeryResultResponse, diff --git a/app/baked/algorithm.py b/app/baked/algorithm.py index eea5f71..f26272c 100644 --- a/app/baked/algorithm.py +++ b/app/baked/algorithm.py @@ -15,8 +15,9 @@ def default_consumable_classifier_labels_yaml_path() -> str: return str(_PACKAGE_DIR / "resources" / "consumable_classifier_labels.yaml") -def default_camera_rtsp_urls_sample_path() -> str: - return str(_PACKAGE_DIR / "resources" / "camera_rtsp_urls.sample.json") +def default_or_site_config_sample_path() -> str: + """站点配置示例:video_rtsp_urls + voice_or_room_bindings。""" + return str(_PACKAGE_DIR / "resources" / "or_site_config.sample.json") # --- 耗材分类(YOLO-cls)--- diff --git a/app/config.py b/app/config.py index 2cdf7eb..b67fddd 100644 --- a/app/config.py +++ b/app/config.py @@ -1,4 +1,5 @@ -import json +from __future__ import annotations + from pathlib import Path from urllib.parse import quote_plus from typing import Any, Literal @@ -7,6 +8,7 @@ from pydantic import Field from pydantic_settings import BaseSettings, SettingsConfigDict from app.baked import algorithm as baked_algorithm +from app.or_site_config import OrSiteConfig class _SettingsGroup: @@ -39,8 +41,7 @@ class _VideoGroup(_SettingsGroup): "video_default_backend", "video_camera_backend_overrides_json", "video_rtsp_url_template", - "video_rtsp_urls_json", - "video_rtsp_urls_json_file", + "or_site_config_json_file", ) @@ -135,8 +136,8 @@ class Settings(BaseSettings): video_default_backend: Literal["rtsp", "hikvision_sdk", "auto"] = "rtsp" video_camera_backend_overrides_json: str = "" video_rtsp_url_template: str = "" - video_rtsp_urls_json: str = "" - video_rtsp_urls_json_file: str = "" + #: 手术室站点配置(UTF-8 JSON):须含 video_rtsp_urls 与 voice_or_room_bindings,见 or_site_config.sample.json + or_site_config_json_file: str = "" hikvision_lib_dir: str = "/opt/hikvision/lib" hikvision_sdk_enabled: bool = False @@ -236,45 +237,27 @@ class Settings(BaseSettings): and self.minio_bucket.strip() ) - @staticmethod - def _parse_rtsp_urls_object(raw: str) -> dict[str, str]: - raw = (raw or "").strip() - if not raw: - return {} - try: - data: Any = json.loads(raw) - except json.JSONDecodeError as exc: - raise ValueError(f"Invalid VIDEO_RTSP_URLS_JSON: {exc}") from exc - if not isinstance(data, dict): - raise ValueError("VIDEO_RTSP_URLS_JSON must be a JSON object") - return {str(k): str(v) for k, v in data.items()} + def load_or_site_config(self) -> OrSiteConfig | None: + """解析 ``or_site_config_json_file``;未配置路径时返回 ``None``。""" + from app.or_site_config import load_or_site_config_from_path + + path_raw = (self.or_site_config_json_file or "").strip() + if not path_raw: + return None + path = Path(path_raw).expanduser() + if not path.is_file(): + raise ValueError(f"OR_SITE_CONFIG_JSON_FILE is set but file not found: {path}") + return load_or_site_config_from_path(path) def video_rtsp_url_map(self) -> dict[str, str]: - merged: dict[str, str] = {} - path_raw = (self.video_rtsp_urls_json_file or "").strip() - if path_raw: - path = Path(path_raw).expanduser() - if not path.is_file(): - raise ValueError( - f"VIDEO_RTSP_URLS_JSON_FILE is set but file not found: {path}" - ) - try: - file_obj: Any = json.loads(path.read_text(encoding="utf-8")) - except json.JSONDecodeError as exc: - raise ValueError( - f"Invalid JSON in VIDEO_RTSP_URLS_JSON_FILE {path}: {exc}" - ) from exc - if not isinstance(file_obj, dict): - raise ValueError( - f"VIDEO_RTSP_URLS_JSON_FILE must contain a JSON object: {path}" - ) - merged = {str(k): str(v) for k, v in file_obj.items()} - merged.update(self._parse_rtsp_urls_object(self.video_rtsp_urls_json)) - return merged + cfg = self.load_or_site_config() + if cfg is None: + return {} + return dict(cfg.video_rtsp_urls) @property - def camera_rtsp_urls_sample_path(self) -> str: - return baked_algorithm.default_camera_rtsp_urls_sample_path() + def or_site_config_sample_path(self) -> str: + return baked_algorithm.default_or_site_config_sample_path() @property def video(self) -> _VideoGroup: diff --git a/app/dependencies.py b/app/dependencies.py index ccfc3d9..9628405 100644 --- a/app/dependencies.py +++ b/app/dependencies.py @@ -25,6 +25,7 @@ from app.services.surgery_pipeline import SurgeryPipeline from app.services.video.hikvision_runtime import HikvisionRuntime from app.services.video.session_manager import CameraSessionManager from app.services.voice_resolution import VoiceConfirmationService +from app.services.voice_terminal_hub import VoiceTerminalHub @dataclass @@ -42,6 +43,7 @@ class AppContainer: tear_segment_model_bundle: TearGatedSegmentModelBundle | None voice_confirmation_service: VoiceConfirmationService surgery_pipeline: SurgeryPipeline + voice_terminal_hub: VoiceTerminalHub async def start(self) -> None: await self.camera_session_manager.start_archive_retry_loop() @@ -96,6 +98,7 @@ def build_container( voice_confirmation=voice, session_factory=sf, ) + voice_hub = VoiceTerminalHub(s) return AppContainer( settings=s, consumable_vision_algorithm_service=vision, @@ -108,6 +111,7 @@ def build_container( tear_segment_model_bundle=tear_bundle, voice_confirmation_service=voice, surgery_pipeline=pipeline, + voice_terminal_hub=voice_hub, ) @@ -140,3 +144,7 @@ def get_surgery_result_repository(request: Request) -> SurgeryResultRepository: def get_voice_confirmation_service(request: Request) -> VoiceConfirmationService: return get_container(request).voice_confirmation_service + + +def get_voice_terminal_hub(request: Request) -> VoiceTerminalHub: + return get_container(request).voice_terminal_hub diff --git a/app/or_site_config.py b/app/or_site_config.py new file mode 100644 index 0000000..fbb95ae --- /dev/null +++ b/app/or_site_config.py @@ -0,0 +1,103 @@ +"""手术室站点配置:单一 JSON 文件,严格结构,无历史格式分支。""" + +from __future__ import annotations + +import json +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +from loguru import logger + +from app.services.voice_terminal_binding import VoiceTerminalBindingIndex + +_ALLOWED_TOP_LEVEL = frozenset({"video_rtsp_urls", "voice_or_room_bindings"}) + + +@dataclass(frozen=True) +class OrSiteConfig: + """根对象须含 ``video_rtsp_urls`` 与 ``voice_or_room_bindings`` 两个键。""" + + video_rtsp_urls: dict[str, str] + voice_bindings: VoiceTerminalBindingIndex + + +def parse_or_site_config_object(data: Any, *, source: str | Path = "") -> OrSiteConfig: + label = str(source) if source else "JSON" + if not isinstance(data, dict): + raise ValueError(f"{label}: OR site config must be a JSON object") + extra = set(data.keys()) - _ALLOWED_TOP_LEVEL + if extra: + raise ValueError( + f"{label}: unknown top-level keys {sorted(extra)}; " + f"allowed: {sorted(_ALLOWED_TOP_LEVEL)}" + ) + if "video_rtsp_urls" not in data or "voice_or_room_bindings" not in data: + raise ValueError( + f"{label}: must include video_rtsp_urls and voice_or_room_bindings" + ) + urls = data["video_rtsp_urls"] + if not isinstance(urls, dict): + raise ValueError(f"{label}: video_rtsp_urls must be a JSON object") + raw_bindings = data["voice_or_room_bindings"] + if not isinstance(raw_bindings, list): + raise ValueError(f"{label}: voice_or_room_bindings must be a JSON array") + for k, v in urls.items(): + if not isinstance(v, str): + raise ValueError( + f"{label}: video_rtsp_urls[{k!r}] must be a string (RTSP URL)" + ) + idx = VoiceTerminalBindingIndex.from_binding_list(raw_bindings) + if idx is None: + raise ValueError(f"{label}: invalid voice_or_room_bindings content") + return OrSiteConfig( + video_rtsp_urls={str(k): str(v) for k, v in urls.items()}, + voice_bindings=idx, + ) + + +def load_or_site_config_from_path(path: Path) -> OrSiteConfig: + p = path.expanduser() + try: + raw_text = p.read_text(encoding="utf-8") + except OSError as exc: + raise ValueError(f"Cannot read OR site config {p}: {exc}") from exc + try: + data: Any = json.loads(raw_text) + except json.JSONDecodeError as exc: + raise ValueError(f"Invalid JSON in OR site config {p}: {exc}") from exc + return parse_or_site_config_object(data, source=p) + + +def merge_video_rtsp_urls_into_file( + path: Path, + url_map: dict[str, str], + *, + replace_host: str, +) -> None: + """写入/更新站点配置中的 ``video_rtsp_urls``,保留 ``voice_or_room_bindings``。""" + if replace_host in ("", "127.0.0.1"): + out_urls = dict(url_map) + else: + out_urls = { + k: v.replace("127.0.0.1", replace_host, 1) + for k, v in url_map.items() + } + path = path.expanduser() + path.parent.mkdir(parents=True, exist_ok=True) + if path.is_file(): + raw_text = path.read_text(encoding="utf-8") + data: Any = json.loads(raw_text) + parse_or_site_config_object(data, source=path) + bindings_list = data["voice_or_room_bindings"] + else: + bindings_list = [] + doc = { + "video_rtsp_urls": out_urls, + "voice_or_room_bindings": bindings_list, + } + text = json.dumps(doc, ensure_ascii=False, indent=2, sort_keys=True) + "\n" + temp = path.with_name(path.name + ".tmp") + temp.write_text(text, encoding="utf-8") + temp.replace(path) + logger.info("Updated video_rtsp_urls in OR site config {}", path) diff --git a/app/resources/camera_rtsp_urls.sample.json b/app/resources/camera_rtsp_urls.sample.json deleted file mode 100644 index c13e8ae..0000000 --- a/app/resources/camera_rtsp_urls.sample.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "or-cam-01": "rtsp://admin:ChangeMe@192.168.1.101:554/Streaming/Channels/101", - "or-cam-02": "rtsp://admin:ChangeMe@192.168.1.102:554/Streaming/Channels/101" -} diff --git a/app/resources/or_site_config.sample.json b/app/resources/or_site_config.sample.json new file mode 100644 index 0000000..b5220e5 --- /dev/null +++ b/app/resources/or_site_config.sample.json @@ -0,0 +1,15 @@ +{ + "video_rtsp_urls": { + "or-cam-01": "rtsp://127.0.0.1:18554/demo1" + }, + "voice_or_room_bindings": [ + { + "camera_ids": [ + "or-cam-01", + "or-cam-02" + ], + "or_room_id": "OR-DEMO", + "voice_terminal_id": "desktop-1" + } + ] +} diff --git a/app/routers/demo_orch.py b/app/routers/demo_orch.py index 15da6e2..d0a3607 100644 --- a/app/routers/demo_orch.py +++ b/app/routers/demo_orch.py @@ -13,10 +13,15 @@ from fastapi import APIRouter, Depends, File, Form, HTTPException, UploadFile, s from loguru import logger from app.config import settings -from app.dependencies import get_surgery_pipeline +from app.dependencies import get_surgery_pipeline, get_voice_terminal_hub from app.schemas import SurgeryApiResponse, SurgeryStartRequest -from app.services.synthetic_rtsp import StreamSpec, SyntheticRtspManager, write_rtsp_url_json_file +from app.or_site_config import merge_video_rtsp_urls_into_file +from app.services.synthetic_rtsp import StreamSpec, SyntheticRtspManager from app.services.surgery_pipeline import SurgeryPipeline +from app.services.voice_terminal_hub import ( + VoiceTerminalHub, + assign_voice_terminal_after_recording_started, +) from app.surgery_errors import SurgeryPipelineError router = APIRouter(prefix="/internal/demo", tags=["demo"]) @@ -39,7 +44,8 @@ def _orchestrate_write_rtsp_host() -> str: summary="一键联调:上传 1–4 路视频并开录", description=( "仅当 DEMO_ORCHESTRATOR_ENABLED=true。保存一路或多路视频、启动 MediaMTX+ffmpeg、" - "将 RTSP 映射写入 VIDEO_RTSP_URLS_JSON_FILE,再执行与 /client/surgeries/start 相同的开录逻辑。" + "将 RTSP 映射合并写入 OR_SITE_CONFIG_JSON_FILE 的 video_rtsp_urls,再执行与 /client/surgeries/start 相同的开录逻辑" + "(含按 voice_or_room_bindings 解析并 WebSocket 推送语音终端指派)。" ), ) async def orchestrate_and_start( @@ -58,6 +64,7 @@ async def orchestrate_and_start( rtsp_path_4: Annotated[str, Form()] = "demo4", candidate_consumables_json: Annotated[str, Form()] = "[]", pipeline: SurgeryPipeline = Depends(get_surgery_pipeline), + voice_hub: VoiceTerminalHub = Depends(get_voice_terminal_hub), ) -> SurgeryApiResponse: logger.info( "demo orchestrate-and-start: surgery_id={} cameras={} rpaths={}", @@ -70,12 +77,13 @@ async def orchestrate_and_start( status_code=status.HTTP_404_NOT_FOUND, detail="Demo orchestrator disabled (set DEMO_ORCHESTRATOR_ENABLED=true).", ) - path_raw = (settings.video_rtsp_urls_json_file or "").strip() + path_raw = (settings.or_site_config_json_file or "").strip() if not path_raw: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=( - "VIDEO_RTSP_URLS_JSON_FILE must be set to a writable path; " + "OR_SITE_CONFIG_JSON_FILE must be set to a writable path " + "(strict site JSON with video_rtsp_urls + voice_or_room_bindings); " "in Docker, bind-mount a host file to this path." ), ) @@ -195,7 +203,7 @@ async def orchestrate_and_start( try: def _write() -> None: - write_rtsp_url_json_file( + merge_video_rtsp_urls_into_file( json_path, url_map_host, replace_host=host_for_json, @@ -224,6 +232,13 @@ async def orchestrate_and_start( detail={"code": exc.code, "message": exc.message, "surgery_id": body.surgery_id}, ) from exc + await assign_voice_terminal_after_recording_started( + voice_hub, + surgery_id=body.surgery_id, + camera_ids=list(body.camera_ids), + set_voice_terminal_id=pipeline.set_voice_terminal_id, + ) + return SurgeryApiResponse( surgery_id=body.surgery_id, status="accepted", diff --git a/app/schemas.py b/app/schemas.py index cd97d73..72c8e17 100644 --- a/app/schemas.py +++ b/app/schemas.py @@ -61,6 +61,13 @@ class SurgeryApiResponse(BaseModel): message: str = Field(description="返回说明。") +class VoiceTerminalAssignmentResponse(BaseModel): + """语音桌面终端当前被指派的手术(HTTP 轮询兜底,与 WebSocket 推送一致)。""" + + voice_terminal_id: str + active_surgery_id: str | None = None + + class SurgeryClientErrorDetail(BaseModel): """与 `HTTPException(detail={...})` 对应;最终 JSON 为 `{"detail": {...}}`。""" diff --git a/app/services/surgery_pipeline.py b/app/services/surgery_pipeline.py index d299827..f243b9e 100644 --- a/app/services/surgery_pipeline.py +++ b/app/services/surgery_pipeline.py @@ -77,13 +77,16 @@ class SurgeryPipeline: f"开录未能确认:{exc}", ) from exc - async def stop_recording(self, surgery_id: str) -> None: - """停止该手术关联的摄像头录制。仅在确认已全部停录时返回。""" + async def stop_recording(self, surgery_id: str) -> str | None: + """停止该手术关联的摄像头录制。仅在确认已全部停录时返回。返回绑定的语音终端 ID(若有)。""" try: - await self._sessions.stop_surgery(surgery_id, require_active=True) + return await self._sessions.stop_surgery(surgery_id, require_active=True) except SurgeryPipelineError: raise + def set_voice_terminal_id(self, surgery_id: str, terminal_id: str | None) -> None: + self._sessions.set_voice_terminal_id(surgery_id, terminal_id) + async def get_consumption_details_for_client( self, surgery_id: str, diff --git a/app/services/synthetic_rtsp.py b/app/services/synthetic_rtsp.py index c34476a..c84bbbe 100644 --- a/app/services/synthetic_rtsp.py +++ b/app/services/synthetic_rtsp.py @@ -6,7 +6,6 @@ process is gone — reconnect or re-orchestrate for another playthrough. from __future__ import annotations -import json import os import shutil import socket @@ -222,25 +221,3 @@ class SyntheticRtspManager: self._active = run return run, url_map - - -def write_rtsp_url_json_file( - path: Path, - url_map: dict[str, str], - *, - replace_host: str, -) -> None: - """Write JSON map; replace 127.0.0.1 in values with `replace_host` (e.g. host.docker.internal).""" - if replace_host in ("", "127.0.0.1"): - out = url_map - else: - out = { - k: v.replace("127.0.0.1", replace_host, 1) - for k, v in url_map.items() - } - path.parent.mkdir(parents=True, exist_ok=True) - text = json.dumps(out, ensure_ascii=False, indent=2, sort_keys=True) + "\n" - temp = path.with_name(path.name + ".tmp") - temp.write_text(text, encoding="utf-8") - temp.replace(path) - logger.info("Wrote RTSP map to {}", path) diff --git a/app/services/video/backend_resolver.py b/app/services/video/backend_resolver.py index c796904..a35e055 100644 --- a/app/services/video/backend_resolver.py +++ b/app/services/video/backend_resolver.py @@ -54,7 +54,7 @@ class BackendResolver: return VideoBackendKind.RTSP def rtsp_url_for_camera(self, camera_id: str) -> str: - # Re-read on each use so VIDEO_RTSP_URLS_JSON_FILE can be hot-updated (e.g. dev orchestrator). + # Re-read on each use so OR_SITE_CONFIG_JSON_FILE can be hot-updated (e.g. dev orchestrator). m = self._s.video_rtsp_url_map() if camera_id in m: return m[camera_id] @@ -67,8 +67,8 @@ class BackendResolver: f"video_rtsp_url_template missing placeholder: {exc}" ) from exc raise ValueError( - f"No RTSP URL for camera_id={camera_id!r}: set VIDEO_RTSP_URLS_JSON_FILE, " - f"VIDEO_RTSP_URLS_JSON, or VIDEO_RTSP_URL_TEMPLATE" + f"No RTSP URL for camera_id={camera_id!r}: set OR_SITE_CONFIG_JSON_FILE " + f"(video_rtsp_urls) or VIDEO_RTSP_URL_TEMPLATE" ) def rtsp_url_after_hikvision_login(self, camera_id: str) -> str: diff --git a/app/services/video/session_manager.py b/app/services/video/session_manager.py index 8ebf7a4..ed5a0ae 100644 --- a/app/services/video/session_manager.py +++ b/app/services/video/session_manager.py @@ -223,7 +223,17 @@ class CameraSessionManager: await self.stop_surgery(surgery_id, require_active=True) raise - async def stop_surgery(self, surgery_id: str, *, require_active: bool = True) -> None: + def set_voice_terminal_id(self, surgery_id: str, terminal_id: str | None) -> None: + """开录成功后写入,供停录时向对应桌面终端推送 end。""" + run = self._registry.get_running(surgery_id) + if run is None: + return + tid = (terminal_id or "").strip() + run.state.voice_terminal_id = tid or None + + async def stop_surgery( + self, surgery_id: str, *, require_active: bool = True + ) -> str | None: run = await self._registry.unregister(surgery_id) if run is None: if require_active: @@ -231,8 +241,9 @@ class CameraSessionManager: "RECORDING_NOT_STOPPED", "停录未能完成:当前没有该手术的活跃录制会话。", ) - return + return None + voice_tid = run.state.voice_terminal_id run.stop_event.set() results = await asyncio.gather(*run.tasks, return_exceptions=True) for res in results: @@ -255,6 +266,7 @@ class CameraSessionManager: append_consumption_log_summary(surgery_id, totals) print_consumption_summary_markdown(totals) await self._archive.persist_or_archive(surgery_id, details) + return voice_tid # ------------------------------------------------------------------ # PendingConfirmationStore 协议委托 diff --git a/app/services/video/session_registry.py b/app/services/video/session_registry.py index 1e0784a..7c3d1e2 100644 --- a/app/services/video/session_registry.py +++ b/app/services/video/session_registry.py @@ -85,6 +85,8 @@ class SurgerySessionState: last_voice_error: str | None = None #: ``start_surgery`` 创建会话时的 ``time.time()``,用于日志中「相对开录的流逝时间」。 surgery_started_wall: float | None = None + #: 术间绑定配置解析出的语音桌面终端 ID;停录时用于推送 end。 + voice_terminal_id: str | None = None @dataclass diff --git a/app/services/voice_terminal_binding.py b/app/services/voice_terminal_binding.py new file mode 100644 index 0000000..2099566 --- /dev/null +++ b/app/services/voice_terminal_binding.py @@ -0,0 +1,84 @@ +"""术间配置:camera_ids 集合与语音桌面终端 ID 绑定。""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any + +from loguru import logger + + +@dataclass(frozen=True) +class OrRoomBinding: + or_room_id: str + camera_ids: frozenset[str] + voice_terminal_id: str + + +@dataclass +class VoiceTerminalBindingIndex: + """由 ``or_site_config.voice_or_room_bindings`` 数组构建。""" + + rooms: tuple[OrRoomBinding, ...] + + def resolve_terminal(self, camera_ids: list[str]) -> str | None: + """精确匹配 camera 集合;否则开录路数为术间子集时匹配最小超集术间。""" + key = frozenset(str(x).strip() for x in camera_ids if str(x).strip()) + if not key: + return None + for r in self.rooms: + if r.camera_ids == key: + return r.voice_terminal_id + candidates = [r for r in self.rooms if key <= r.camera_ids] + if not candidates: + return None + if len(candidates) == 1: + return candidates[0].voice_terminal_id + candidates.sort(key=lambda r: (len(r.camera_ids), r.or_room_id, r.voice_terminal_id)) + return candidates[0].voice_terminal_id + + @staticmethod + def from_binding_list(data: list[Any]) -> VoiceTerminalBindingIndex | None: + rows: list[OrRoomBinding] = [] + seen_terminals: set[str] = set() + seen_camera_sets: set[frozenset[str]] = set() + for i, item in enumerate(data): + if not isinstance(item, dict): + logger.warning("voice_or_room_bindings[{}] must be an object", i) + return None + rid = str(item.get("or_room_id") or "").strip() + tid = str(item.get("voice_terminal_id") or "").strip() + cams = item.get("camera_ids") + if not rid or not tid or not isinstance(cams, list): + logger.warning( + "voice_or_room_bindings[{}] missing or invalid fields", i + ) + return None + cam_set = frozenset(str(x).strip() for x in cams if str(x).strip()) + if not cam_set: + logger.warning( + "voice_or_room_bindings[{}] camera_ids must be non-empty", i + ) + return None + if tid in seen_terminals: + logger.warning( + "voice_or_room_bindings: duplicate voice_terminal_id {!r}", + tid, + ) + return None + if cam_set in seen_camera_sets: + logger.warning( + "voice_or_room_bindings: duplicate camera_ids set for room {!r}", + rid, + ) + return None + seen_terminals.add(tid) + seen_camera_sets.add(cam_set) + rows.append( + OrRoomBinding( + or_room_id=rid, + camera_ids=cam_set, + voice_terminal_id=tid, + ) + ) + return VoiceTerminalBindingIndex(rooms=tuple(rows)) diff --git a/app/services/voice_terminal_hub.py b/app/services/voice_terminal_hub.py new file mode 100644 index 0000000..38c6118 --- /dev/null +++ b/app/services/voice_terminal_hub.py @@ -0,0 +1,152 @@ +"""语音桌面终端:assignment 状态、WebSocket 推送与 HTTP 轮询兜底。""" + +from __future__ import annotations + +import json +from asyncio import Lock +from collections import defaultdict +from collections.abc import Callable + +from fastapi import WebSocket +from loguru import logger +from starlette.websockets import WebSocketDisconnect + +from app.config import Settings +from app.services.voice_terminal_binding import VoiceTerminalBindingIndex + + +async def assign_voice_terminal_after_recording_started( + hub: VoiceTerminalHub, + *, + surgery_id: str, + camera_ids: list[str], + set_voice_terminal_id: Callable[[str, str | None], None], +) -> None: + """开录成功后:按站点绑定解析终端、写入会话、并 WebSocket 推送 start(与 HTTP 开录一致)。""" + voice_tid = hub.resolve_terminal(list(camera_ids)) + if voice_tid: + set_voice_terminal_id(surgery_id, voice_tid) + await hub.notify_start(voice_tid, surgery_id) + elif hub.bindings is not None: + logger.warning( + "voice or room bindings have no camera set matching start " + "surgery_id={} camera_ids={}", + surgery_id, + camera_ids, + ) + + +class VoiceTerminalHub: + """进程内终端连接与当前手术分配(多 worker 需另行同步)。""" + + def __init__(self, settings: Settings) -> None: + cfg = settings.load_or_site_config() + self._bindings = cfg.voice_bindings if cfg else None + self._assignments: dict[str, str] = {} + self._lock = Lock() + self._connections: dict[str, set[WebSocket]] = defaultdict(set) + + @property + def bindings(self) -> VoiceTerminalBindingIndex | None: + return self._bindings + + def resolve_terminal(self, camera_ids: list[str]) -> str | None: + if self._bindings is None: + return None + return self._bindings.resolve_terminal(camera_ids) + + def get_assignment(self, terminal_id: str) -> str | None: + return self._assignments.get(terminal_id.strip()) + + async def notify_start(self, terminal_id: str, surgery_id: str) -> None: + tid = terminal_id.strip() + if not tid: + return + payload = { + "type": "voice_assignment", + "action": "start", + "surgery_id": surgery_id, + } + async with self._lock: + self._assignments[tid] = surgery_id + await self._broadcast(tid, payload) + logger.info( + "Voice terminal {} assigned surgery {} (start push)", + tid, + surgery_id, + ) + + async def notify_end(self, terminal_id: str | None, surgery_id: str) -> None: + if not terminal_id: + return + tid = terminal_id.strip() + if not tid: + return + payload = { + "type": "voice_assignment", + "action": "end", + "surgery_id": surgery_id, + } + async with self._lock: + if self._assignments.get(tid) == surgery_id: + del self._assignments[tid] + await self._broadcast(tid, payload) + logger.info( + "Voice terminal {} released surgery {} (end push)", + tid, + surgery_id, + ) + + async def handle_websocket(self, websocket: WebSocket, terminal_id: str) -> None: + tid = terminal_id.strip() + if not tid: + await websocket.close(code=4400) + return + await websocket.accept() + async with self._lock: + self._connections[tid].add(websocket) + try: + # 连接后立即推送当前 assignment,避免错过 start + sid = self._assignments.get(tid) + if sid: + await websocket.send_text( + json.dumps( + { + "type": "voice_assignment", + "action": "start", + "surgery_id": sid, + }, + ensure_ascii=False, + ) + ) + # 不能用 receive_text():桌面端 websocket-client 会发 ping/二进制控制帧, + # ASGI 可能呈现为无 "text" 的 websocket.receive,receive_text 会 KeyError 并掐断连接。 + while True: + message = await websocket.receive() + if message["type"] == "websocket.disconnect": + break + except WebSocketDisconnect: + pass + finally: + async with self._lock: + conns = self._connections.get(tid) + if conns: + conns.discard(websocket) + if not conns: + del self._connections[tid] + + async def _broadcast(self, terminal_id: str, payload: dict) -> None: + text = json.dumps(payload, ensure_ascii=False) + async with self._lock: + targets = list(self._connections.get(terminal_id, ())) + dead: list[WebSocket] = [] + for ws in targets: + try: + await ws.send_text(text) + except Exception as exc: + logger.debug("voice terminal ws send failed: {}", exc) + dead.append(ws) + if dead: + async with self._lock: + for ws in dead: + self._connections[terminal_id].discard(ws) diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml index fa9b821..41f652a 100644 --- a/docker-compose.prod.yml +++ b/docker-compose.prod.yml @@ -44,8 +44,7 @@ services: # Video backends (RTSP / optional Hikvision SDK) — see docs/video-backends.md VIDEO_DEFAULT_BACKEND: ${VIDEO_DEFAULT_BACKEND:-rtsp} VIDEO_RTSP_URL_TEMPLATE: ${VIDEO_RTSP_URL_TEMPLATE:-} - VIDEO_RTSP_URLS_JSON_FILE: ${VIDEO_RTSP_URLS_JSON_FILE:-} - VIDEO_RTSP_URLS_JSON: ${VIDEO_RTSP_URLS_JSON:-} + OR_SITE_CONFIG_JSON_FILE: ${OR_SITE_CONFIG_JSON_FILE:-} VIDEO_CAMERA_BACKEND_OVERRIDES_JSON: ${VIDEO_CAMERA_BACKEND_OVERRIDES_JSON:-} HIKVISION_SDK_ENABLED: ${HIKVISION_SDK_ENABLED:-false} HIKVISION_LIB_DIR: ${HIKVISION_LIB_DIR:-/opt/hikvision/lib} diff --git a/docs/staging-regression-checklist.md b/docs/staging-regression-checklist.md index 6598949..c812672 100644 --- a/docs/staging-regression-checklist.md +++ b/docs/staging-regression-checklist.md @@ -7,7 +7,7 @@ ## 环境 - `GET /health` 返回 `200`,`database: connected` -- 环境变量:`VIDEO_RTSP_URLS_JSON` 或 `VIDEO_RTSP_URLS_JSON_FILE` 与客户端 `camera_ids` 一致 +- 环境变量:`OR_SITE_CONFIG_JSON_FILE` 中 `video_rtsp_urls` 与客户端 `camera_ids` 一致;`voice_or_room_bindings` 与现场术间一致 - `MINIO_*`、`BAIDU_APP_ID` / `BAIDU_API_KEY` / `BAIDU_SECRET_KEY` 已配置(语音确认链路) - 模型权重在镜像/挂载中可读(默认路径见 `app/baked/algorithm.py` 与仓库 `app/resources/*.pt`) diff --git a/docs/video-backends.md b/docs/video-backends.md index 397e0a9..deaae02 100644 --- a/docs/video-backends.md +++ b/docs/video-backends.md @@ -8,10 +8,9 @@ ## RTSP 模式(默认) -1. 配置 `**camera_id` → RTSP URL** 映射,任选其一或组合使用: - - `**VIDEO_RTSP_URLS_JSON_FILE`**:指向 UTF-8 JSON 文件(对象键为与请求一致的 `camera_id`)。仓库示例:`[app/resources/camera_rtsp_urls.sample.json](../app/resources/camera_rtsp_urls.sample.json)`(示例 ID:`or-cam-01`、`or-cam-02`)。 - - `**VIDEO_RTSP_URLS_JSON**`:内联 JSON 字符串;与文件合并时**覆盖同键**。 - - `**VIDEO_RTSP_URL_TEMPLATE`**:单模板,可用 `{camera_id}`。 +1. 配置 `**camera_id` → RTSP URL** 映射: + - `**OR_SITE_CONFIG_JSON_FILE**`(推荐):UTF-8 JSON 文件,**仅支持**站点对象:`{"video_rtsp_urls":{...},"voice_or_room_bindings":[...]}`。根级只允许这两个键;`voice_or_room_bindings` 可为 `[]`。见 `[app/resources/or_site_config.sample.json](../app/resources/or_site_config.sample.json)`。服务每次解析映射时会重新读文件,便于联调覆盖 `video_rtsp_urls`。 + - `**VIDEO_RTSP_URL_TEMPLATE**`(可选):单模板字符串,可用 `{camera_id}`;在 `video_rtsp_urls` 未给出某路时使用。 2. 调用 `POST /client/surgeries/start` 时,`camera_ids` 必须能在上述配置中解析出 RTSP 地址。 3. **开录确认**:每路摄像头在超时内成功打开并读到**首帧**后,才认为该路已开录。 @@ -26,7 +25,7 @@ SDK **不作为构建期依赖**:将厂商提供的 Linux x86_64 动态库挂 行为概要: 1. 进程内对 `NET_DVR_Init` 使用引用计数;每路使用 SDK 的工作线程在登录后 `NET_DVR_Logout`,线程结束时配对 `NET_DVR_Cleanup`。 -2. 若 `HIKVISION_SDK_FALLBACK_TO_RTSP=true`(默认),在**无法加载动态库**、**登录失败**或**未配置凭据**时,自动回退到 `VIDEO_RTSP_`* 映射拉流。 +2. 若 `HIKVISION_SDK_FALLBACK_TO_RTSP=true`(默认),在**无法加载动态库**、**登录失败**或**未配置凭据**时,自动回退到 **OR 站点配置**中的 `video_rtsp_urls` 或 `VIDEO_RTSP_URL_TEMPLATE` 拉流。 **注意**:`NET_DVR_Login_V30` 的设备信息结构体在不同 SDK 版本上可能存在差异;若登录异常,请优先使用 RTSP 回退或按厂商文档校对 ctypes 绑定。 diff --git a/docs/客户端手术通信接口说明.md b/docs/客户端手术通信接口说明.md index dfd8028..44239d7 100644 --- a/docs/客户端手术通信接口说明.md +++ b/docs/客户端手术通信接口说明.md @@ -41,8 +41,18 @@ RTSP 地址、账号、口令等由客户端对接工程师提供给服务端运 | 4 | `GET` | `/client/surgeries/{surgery_id}/result` | 查询手术结果 | | 5 | `GET` | `/client/surgeries/{surgery_id}/pending-confirmation` | 拉取待确认耗材 | | 6 | `POST` | `/client/surgeries/{surgery_id}/pending-confirmation/{confirmation_id}/resolve` | 提交医生答复 | +| 7 | `GET` | `/client/voice-terminals/{terminal_id}/assignment` | 可选:查询当前指派(调试或简易集成;**官方桌面客户端仅用 WebSocket**) | +| 8 | `WS` | `/client/voice-terminals/ws?terminal_id=...` | 语音桌面终端长连接,接收开录/停录指派(**推荐**) | | | | | | +**术间与语音终端绑定(服务端配置)** + +- **唯一配置源**:环境变量 **`OR_SITE_CONFIG_JSON_FILE`** 指向手术室 **站点 JSON**(UTF-8),须同时包含 `video_rtsp_urls` 与 `voice_or_room_bindings`(见仓库 `app/resources/or_site_config.sample.json`)。`voice_or_room_bindings` 为数组,每项含 `or_room_id`、`camera_ids`、`voice_terminal_id`;`camera_ids` 在数组内须唯一,`voice_terminal_id` 全局唯一。 +- **`POST /client/surgeries/start`** 在 **HTTP 200 且开录已成功** 后:用请求体中的 `camera_ids` 在 `voice_or_room_bindings` 中解析终端(**精确匹配**术间 camera 集合,或 **开录路集为某术间 camera 集合的子集** 时匹配该术间);命中则向对应 `voice_terminal_id` 推送 **`action":"start"`**(并更新 assignment);未配置站点文件、或数组为空、或未命中则仅打日志,不影响 200。 +- **`POST /client/surgeries/end`** 在停录 **HTTP 200** 后:向该手术会话记录的终端推送 **`action":"end"`**(并清除 assignment)。 +- 推送 JSON 形如:`{"type":"voice_assignment","action":"start"|"end","surgery_id":"123456"}`。 +- **多 worker**:当前实现为进程内内存;多 Uvicorn worker 时需 sticky session 或 Redis 等另行同步。 + ## 4. 流程 ### 4.1 时序图 @@ -421,4 +431,27 @@ flowchart LR curl -sS -X POST \ "http://<主机>:38080/client/surgeries/123456/pending-confirmation//resolve" \ -F "audio=@/path/to/voice.wav;type=audio/wav" -``` \ No newline at end of file +``` + +### 5.7 语音终端 assignment(HTTP,可选) + +**路径** `GET /client/voice-terminals/{terminal_id}/assignment` + +仓库内 **手术室耗材语音确认桌面客户端** 仅通过 **§5.8 WebSocket** 接收指派,**不调用**本接口。此处供运维脚本、未实现 WS 的第三方临时拉取 `active_surgery_id`。 + +**响应 200** + +| **字段** | **类型** | **说明** | +| -------- | -------- | -------- | +| `voice_terminal_id` | `string` | 与路径一致 | +| `active_surgery_id` | `string \| null` | 当前指派手术 6 位号;无指派时为 `null` | + +### 5.8 语音终端 WebSocket + +**路径** `GET ws://<主机>:<端口>/client/voice-terminals/ws?terminal_id=<终端ID>`(HTTPS 部署时使用 `wss://`) + +**说明** + +- 连接成功后,若服务端已有该终端的 assignment,会立即收到一条 **`action":"start"`** 的 JSON(与下文推送格式一致)。 +- 术中由服务端在 **`start` / `end` 成功后** 向已连接终端推送 JSON:`{"type":"voice_assignment","action":"start"|"end","surgery_id":"123456"}`。 +- 客户端可发送任意文本作心跳;服务端当前仅依赖 WebSocket 协议级 ping(由网关或客户端库实现)。 \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index e8cfa8b..85d5419 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,15 +51,19 @@ dev = [ ] voice-client = [ "httpx>=0.28.0", + "loguru>=0.7.3", "numpy>=2.0.0", "PySide6>=6.8.0", "sounddevice>=0.5.0", + "websocket-client>=1.8.0", ] voice-client-build = [ "httpx>=0.28.0", + "loguru>=0.7.3", "numpy>=2.0.0", "PySide6>=6.8.0", "sounddevice>=0.5.0", + "websocket-client>=1.8.0", "pyinstaller>=6.0.0", ] diff --git a/scripts/demo_client/README.md b/scripts/demo_client/README.md index 1926230..4210978 100644 --- a/scripts/demo_client/README.md +++ b/scripts/demo_client/README.md @@ -33,7 +33,7 @@ python3 scripts/demo_client/fake_rtsp_from_file.py --port 18554 \ --stream 'or-cam-02|./b.mp4|demo2' ``` -`--stream` 格式为 `CAMERA_ID|文件路径|RTSP_PATH`(竖线分隔,整条加引号),生成的 `VIDEO_RTSP_URLS_JSON` 会同时包含 `or-cam-01` 与 `or-cam-02`。 +`--stream` 格式为 `CAMERA_ID|文件路径|RTSP_PATH`(竖线分隔,整条加引号),脚本会在 stderr 打印含 `video_rtsp_urls` 与 `voice_or_room_bindings: []` 的 **站点 JSON 片段**,可合并进 `OR_SITE_CONFIG_JSON_FILE`。 在**另一终端**启动监控服务前 `source` 或手动 `export` 上述变量,使 `POST /client/surgeries/start` 里使用的 `camera_ids`(如 `or-cam-01,or-cam-02`)能解析到对应 URL。Demo 页里「将 camera_id 填到开始手术」可一键同步两路 id。 @@ -46,7 +46,7 @@ python3 scripts/demo_client/fake_rtsp_from_file.py --port 18554 \ - 给该服务容器加 `--add-host=host.docker.internal:host-gateway`(Docker 20.10+),或 - 直接把 URL 写成宿主在 **docker0/桥接网** 上可达的局域网 IP(如 `192.168.x.x`),保证从容器内 `curl`/`ffprobe` 能通 -`docker-compose` 里可将 `VIDEO_RTSP_URLS_JSON` 写进 `environment:` 或 env 文件;**不要**在仅容器可解析的配置里写 `127.0.0.1` 去指宿主机上的 RTSP(`127.0.0.1` 在容器内是容器自己)。 +生产/容器环境请使用 **`OR_SITE_CONFIG_JSON_FILE`** 指向完整站点 JSON(含 `video_rtsp_urls` 与 `voice_or_room_bindings`)。**不要**在仅容器可解析的配置里写 `127.0.0.1` 去指宿主机上的 RTSP(`127.0.0.1` 在容器内是容器自己)。 若监控与假 RTSP **都在宿主机同一系统**里直接跑(非容器),则用 `rtsp://127.0.0.1:...` 即可;否则应使用上面「容器连宿主」的写法。 @@ -58,7 +58,7 @@ ffmpeg -re -i recording.mp4 -c:v libx264 -pix_fmt yuv420p -f rtsp -rtsp_transpor (仍须先自行启动 MediaMTX 或等价 RTSP 服务端;上例为**播完即止**,若要循环请加 `-stream_loop -1`。) -Demo 页面「调试:两路视频」中可用 **选择视频** / **拖放** 为路1/路2 指定文件,并配合下面 **一键开录** 上传,无需在页面里手抄 `python3` / `export` 命令。若必须完全手跑 `fake_rtsp_from_file.py`,请在上文命令示例与 `export VIDEO_RTSP_URLS_JSON=...` 方式自行在终端完成。 +Demo 页面「调试:两路视频」中可用 **选择视频** / **拖放** 为路1/路2 指定文件,并配合下面 **一键开录** 上传。若必须完全手跑 `fake_rtsp_from_file.py`,请将其打印的站点 JSON 合并进 `OR_SITE_CONFIG_JSON_FILE`。 ## 一键开录(不再手抄命令) @@ -66,13 +66,13 @@ Demo 页面「调试:两路视频」中可用 **选择视频** / **拖放** 1. 落盘两路视频到临时目录 2. 用 Docker 起 MediaMTX、两路 ffmpeg 推 RTSP(与 `fake_rtsp_from_file.py` 等效) -3. 把 `{"or-cam-01":"rtsp://127.0.0.1:…","or-cam-02":"rtsp://127.0.0.1:…"}` 写入 `VIDEO_RTSP_URLS_JSON_FILE`(与开录/拉流同进程,固定本机回环;`DEMO_ORCHESTRATOR_RTSP_JSON_HOST` 仅影响你**手配**假流、给另一进程读 JSON 的用法) +3. 把当前假流的 **video_rtsp_urls** 合并写入 `OR_SITE_CONFIG_JSON_FILE`(保留已有 `voice_or_room_bindings`;与开录/拉流同进程,固定本机回环) 4. 调用与普通开录相同逻辑 **需同时满足**: - `.env` 中 `DEMO_ORCHESTRATOR_ENABLED=true`(并重启 API) -- 已设置 `VIDEO_RTSP_URLS_JSON_FILE` 指向**可写**的 JSON 文件;Docker 中请用 **bind-mount** 到容器内同一路径 +- 已设置 `OR_SITE_CONFIG_JSON_FILE` 指向**可写**的站点 JSON;Docker 中请用 **bind-mount** 到容器内同一路径 - **运行 `main.py` 的进程**能执行本机 `docker` 与 `ffmpeg`(与手动跑 `fake_rtsp_from_file` 相同)。**仅将 API 放 Docker、且不挂载** ` /var/run/docker.sock` 时,容器内往往无法为你在宿主机起 MediaMTX,此时请继续用手动假流方式。 由于每次解析都会重新读取 `video_rtsp_url_map()`,覆盖 JSON 后**无需重启**主服务即可被下一次开录用到。 diff --git a/scripts/demo_client/fake_rtsp_from_file.py b/scripts/demo_client/fake_rtsp_from_file.py index 8fe665e..a21a617 100644 --- a/scripts/demo_client/fake_rtsp_from_file.py +++ b/scripts/demo_client/fake_rtsp_from_file.py @@ -3,7 +3,7 @@ The Operation Room server only opens RTSP URLs (OpenCV); there is no video-upload API. This script does NOT change the application backend: it runs ffmpeg + a small -RTSP server (MediaMTX) so you can point VIDEO_RTSP_URLS_JSON to rtsp://.../yourpath. +RTSP server (MediaMTX); put the printed ``video_rtsp_urls`` into ``OR_SITE_CONFIG_JSON_FILE``. Requires: - ffmpeg in PATH @@ -210,14 +210,18 @@ def main() -> int: p = subprocess.Popen(publish_cmd) # noqa: S603 procs.append(p) - j_compact = json.dumps(url_map, ensure_ascii=False, separators=(",", ":")) + site_doc = {"video_rtsp_urls": url_map, "voice_or_room_bindings": []} print("---", file=sys.stderr) - print("RTSP mapping (set on monitoring server):", file=sys.stderr) + print("RTSP mapping (per camera):", file=sys.stderr) for k, u in url_map.items(): print(f" {k}: {u}", file=sys.stderr) print("", file=sys.stderr) - print("export (same machine as monitoring server, env snippet):", file=sys.stderr) - print(f" export VIDEO_RTSP_URLS_JSON='{j_compact}'", file=sys.stderr) + print( + "OR site config (merge video_rtsp_urls into OR_SITE_CONFIG_JSON_FILE; " + "add voice_or_room_bindings as needed):", + file=sys.stderr, + ) + print(json.dumps(site_doc, ensure_ascii=False, indent=2), file=sys.stderr) print("", file=sys.stderr) print("If the server runs in Docker on Mac/Win, use host.docker.internal, e.g.:", file=sys.stderr) for cam, u in url_map.items(): diff --git a/scripts/demo_client/index.html b/scripts/demo_client/index.html index 40869f8..3b5b677 100644 --- a/scripts/demo_client/index.html +++ b/scripts/demo_client/index.html @@ -231,6 +231,12 @@ +

+ +

@@ -241,7 +247,7 @@

调试:多路视频 1–4 路(与一键联调 / 无真摄像头)

- 在下方选好各路视频、第 4.1 节勾选「一键联调」后点「开始手术」即可;服务端会起假 RTSP 并写 VIDEO_RTSP_URLS_JSON_FILE。无法使用一键时,请按 scripts/demo_client/README.md 在宿主机手跑 + 在下方选好各路视频、第 4.1 节勾选「一键联调」后点「开始手术」即可;服务端会起假 RTSP 并合并写入 OR_SITE_CONFIG_JSON_FILEvideo_rtsp_urls。无法使用一键时,请按 scripts/demo_client/README.md 在宿主机手跑 fake_rtsp_from_file.py 并配置环境变量。

@@ -363,7 +369,7 @@

@@ -388,6 +394,7 @@
+