feat: 配置写死与 baked 模块,Alembic 建表,百度仅 BAIDU_*

- 新增 app/baked/algorithm|pipeline,非部署参数不再走 env;Settings 保留 DB/HTTP/RTSP/海康/百度/MinIO/Demo
- 移除 init_db_schema 与 reload 配置;main 仅 check_database;start*.sh 在 uvicorn 前执行 alembic upgrade head
- 依赖 psycopg[binary] 供 Alembic 同步 URL;alembic/env 注释与预发清单更新
- 撕段门控消费管线、各视频/语音/归档调用改为 baked
- 百度环境变量仅 BAIDU_APP_ID、BAIDU_API_KEY、BAIDU_SECRET_KEY 与 BAIDU_* 超时/ASR;人脸脚本与 baidu_speech 文案同步
- 全量单测与 .env.example 更新;.gitignore 忽略 refs/(本地权重/视频不入库)

Made-with: Cursor
This commit is contained in:
Kevin
2026-04-24 15:33:22 +08:00
parent b651364877
commit 8a4bad99d3
47 changed files with 1333 additions and 648 deletions

View File

@@ -22,7 +22,7 @@ from typing import TYPE_CHECKING
from loguru import logger
from sqlalchemy.ext.asyncio import async_sessionmaker
from app.config import Settings
from app.baked import pipeline as bp
from app.domain.consumption import SurgeryConsumptionStored
if TYPE_CHECKING:
@@ -86,11 +86,9 @@ class ArchivePersister:
def __init__(
self,
*,
settings: Settings,
repository: "SurgeryResultRepository | None",
session_factory: async_sessionmaker,
) -> None:
self._s = settings
self._repo = repository
self._session_factory = session_factory
self._archive: dict[str, _ArchiveEntry] = {}
@@ -139,7 +137,7 @@ class ArchivePersister:
if await self._write_to_db(surgery_id, details):
return True
entry = _ArchiveEntry(details=list(details))
if self._s.archive_persist_durable_fallback_enabled:
if bp.ARCHIVE_PERSIST_DURABLE_FALLBACK_ENABLED:
entry.durable_path = self._write_durable(surgery_id, details)
async with self._lock:
self._archive[surgery_id] = entry
@@ -193,9 +191,9 @@ class ArchivePersister:
async def recover_from_durable_fallback(self) -> int:
"""进程启动时调用:从 durable 目录把未写库的归档读回内存。"""
if not self._s.archive_persist_durable_fallback_enabled:
if not bp.ARCHIVE_PERSIST_DURABLE_FALLBACK_ENABLED:
return 0
directory = Path(self._s.archive_persist_durable_fallback_dir)
directory = Path(bp.ARCHIVE_PERSIST_DURABLE_FALLBACK_DIR)
if not directory.exists():
return 0
loaded = 0
@@ -250,7 +248,7 @@ class ArchivePersister:
surgery_id: str,
details: list[SurgeryConsumptionStored],
) -> Path | None:
directory = Path(self._s.archive_persist_durable_fallback_dir)
directory = Path(bp.ARCHIVE_PERSIST_DURABLE_FALLBACK_DIR)
try:
directory.mkdir(parents=True, exist_ok=True)
except Exception as exc:
@@ -281,15 +279,15 @@ class ArchivePersister:
logger.debug("remove durable archive {} failed: {}", path, exc)
def _next_backoff_seconds(self, attempts: int) -> float:
base = float(self._s.archive_persist_retry_interval_seconds)
cap = float(self._s.archive_persist_backoff_cap_seconds)
base = float(bp.ARCHIVE_PERSIST_RETRY_INTERVAL_SECONDS)
cap = float(bp.ARCHIVE_PERSIST_BACKOFF_CAP_SECONDS)
# 指数退避base * 2^(attempts-1),首个间隔即 base。
exp = max(0, attempts - 1)
return min(cap, base * (2**exp))
async def _retry_loop(self) -> None:
base = float(self._s.archive_persist_retry_interval_seconds)
max_attempts = int(self._s.archive_persist_max_retries)
base = float(bp.ARCHIVE_PERSIST_RETRY_INTERVAL_SECONDS)
max_attempts = int(bp.ARCHIVE_PERSIST_MAX_RETRIES)
while not self._retry_stop.is_set():
try:
await asyncio.wait_for(self._retry_stop.wait(), timeout=base)

View File

@@ -14,7 +14,7 @@ from __future__ import annotations
from loguru import logger
from app.config import Settings
from app.baked import pipeline as bp
from app.services.consumable_vision_algorithm import (
PredictionCandidate,
PredictionResult,
@@ -54,10 +54,8 @@ class VisionClassificationHandler:
def __init__(
self,
*,
settings: Settings,
registry: SurgerySessionRegistry,
) -> None:
self._s = settings
self._registry = registry
def _append_vision_consumption_window_if_ready(
@@ -72,8 +70,8 @@ class VisionClassificationHandler:
or not surgery_id
or not camera_id
or (
not self._s.consumption_tsv_log_enabled
and not self._s.consumption_log_markdown_terminal
not bp.CONSUMPTION_TSV_LOG_ENABLED
and not bp.CONSUMPTION_LOG_MARKDOWN_TERMINAL
)
):
return
@@ -81,7 +79,7 @@ class VisionClassificationHandler:
surgery_id=surgery_id,
name_to_code=state.name_to_code,
best=ready.best,
doctor_id=self._s.video_result_doctor_id,
doctor_id=bp.VIDEO_RESULT_DOCTOR_ID,
camera_id=camera_id,
wall_start_epoch=ready.wall_lo,
wall_end_epoch=ready.wall_hi,
@@ -100,7 +98,7 @@ class VisionClassificationHandler:
label = (cls_res.label or "").strip()
t1_pid = (ready.best.t1_pid if ready is not None else "")
item_id = resolve_consumption_item_id(label, t1_pid, state.name_to_code)
voice_floor = self._s.video_voice_confirm_min_confidence
voice_floor = bp.VIDEO_VOICE_CONFIRM_MIN_CONFIDENCE
if conf < voice_floor:
return
@@ -110,7 +108,7 @@ class VisionClassificationHandler:
cand_set = set(cand_order)
ranked = rank_topk_for_candidates(cls_res.topk, cand_order)
auto_th = self._s.video_auto_confirm_confidence
auto_th = bp.VIDEO_AUTO_CONFIRM_CONFIDENCE
def in_allowed(name: str) -> bool:
return name in cand_set
@@ -123,13 +121,13 @@ class VisionClassificationHandler:
state=state,
item_id=item_id or "unknown",
item_name=label or "unknown",
doctor_id=self._s.video_result_doctor_id,
doctor_id=bp.VIDEO_RESULT_DOCTOR_ID,
source="vision",
)
return
if conf >= auto_th and not in_allowed(label):
if ranked and self._s.voice_confirmation_enabled:
if ranked and bp.VOICE_CONFIRMATION_ENABLED:
await self._enqueue(
state,
ranked,
@@ -141,7 +139,7 @@ class VisionClassificationHandler:
)
return
if not self._s.voice_confirmation_enabled:
if not bp.VOICE_CONFIRMATION_ENABLED:
return
if ranked:
@@ -190,22 +188,22 @@ class VisionClassificationHandler:
top_key,
)
if ready is not None and surgery_id and camera_id and (
self._s.consumption_tsv_log_enabled
or self._s.consumption_log_markdown_terminal
bp.CONSUMPTION_TSV_LOG_ENABLED
or bp.CONSUMPTION_LOG_MARKDOWN_TERMINAL
):
append_consumption_pending_window(
surgery_id=surgery_id,
confirmation_id=cid,
model_snap=ready.best,
doctor_id=self._s.video_result_doctor_id,
doctor_id=bp.VIDEO_RESULT_DOCTOR_ID,
camera_id=camera_id,
wall_start_epoch=ready.wall_lo,
wall_end_epoch=ready.wall_hi,
tsv_enabled=self._s.consumption_tsv_log_enabled,
markdown_terminal=self._s.consumption_log_markdown_terminal,
tsv_enabled=bp.CONSUMPTION_TSV_LOG_ENABLED,
markdown_terminal=bp.CONSUMPTION_LOG_MARKDOWN_TERMINAL,
)
await self._registry.append_pending_consumption_detail(
state=state,
confirmation_id=cid,
doctor_id=self._s.video_result_doctor_id,
doctor_id=bp.VIDEO_RESULT_DOCTOR_ID,
)

View File

@@ -10,7 +10,7 @@ from __future__ import annotations
import time
from dataclasses import dataclass
from app.config import Settings
from app.baked import algorithm as ba
from app.services.consumable_vision_algorithm import (
ClsTop3,
PredictionResult,
@@ -40,8 +40,8 @@ class WindowInferenceAggregator:
便于与原逻辑保持一致;调用方在持有 ``state.lock`` 时调用下面的方法。
"""
def __init__(self, *, settings: Settings) -> None:
self._s = settings
def __init__(self) -> None:
pass
def ingest_snapshot_and_collect_ready(
self,
@@ -57,7 +57,7 @@ class WindowInferenceAggregator:
"""
_ = surgery_id
_ = camera_id
wsec = self._s.consumable_vision_window_sec
wsec = ba.CONSUMABLE_VISION_WINDOW_SEC
ready: list[WindowInferenceReady] = []
cis = state.camera_infer.setdefault(camera_id, CameraStreamInferState())
if cis.stream_t0 is None:

View File

@@ -2,9 +2,13 @@ from __future__ import annotations
import asyncio
import time
from datetime import datetime, timezone
from pathlib import Path
from loguru import logger
from app.baked import algorithm as ba
from app.baked import pipeline as bp
from app.config import Settings
from sqlalchemy.ext.asyncio import async_sessionmaker
@@ -27,6 +31,15 @@ from app.services.video.session_registry import (
SurgerySessionRegistry,
SurgerySessionState,
)
from app.services.tear_gated_segment_consumption.product_map import (
load_tear_segment_name_to_id,
resolve_tear_segment_labels_yaml_path,
)
from app.services.tear_gated_segment_consumption.report import write_tear_segment_txt
from app.services.tear_gated_segment_consumption.runner import (
TearGatedSegmentModelBundle,
TearGatedSegmentRunner,
)
from app.services.video.stream_worker import CameraStreamWorker, redact_rtsp_url
from app.services.video.types import VideoBackendKind
from app.schemas import SurgeryConsumptionDetail, build_consumption_summary
@@ -69,21 +82,21 @@ class CameraSessionManager:
session_factory: async_sessionmaker | None = None,
registry: SurgerySessionRegistry | None = None,
archive_persister: ArchivePersister | None = None,
tear_segment_models: TearGatedSegmentModelBundle | None = None,
) -> None:
self._s = settings
self._vision = vision_algorithm
self._hik = hikvision_runtime
self._tear_models = tear_segment_models
self._session_factory: async_sessionmaker = session_factory or AsyncSessionLocal
self._resolver = BackendResolver(settings, hikvision_runtime=hikvision_runtime)
self._registry = registry or SurgerySessionRegistry(settings=settings)
self._registry = registry or SurgerySessionRegistry()
self._archive = archive_persister or ArchivePersister(
settings=settings,
repository=result_repository,
session_factory=self._session_factory,
)
self._aggregator = WindowInferenceAggregator(settings=settings)
self._aggregator = WindowInferenceAggregator()
self._classifier_handler = VisionClassificationHandler(
settings=settings,
registry=self._registry,
)
@@ -157,7 +170,7 @@ class CameraSessionManager:
stop_event = asyncio.Event()
readies = [asyncio.Event() for _ in camera_ids]
tasks: list[asyncio.Task[None]] = []
open_timeout = self._s.video_open_timeout_sec + 5.0
open_timeout = bp.VIDEO_OPEN_TIMEOUT_SEC + 5.0
for cam_id, ready in zip(camera_ids, readies, strict=True):
tasks.append(
@@ -175,9 +188,18 @@ class CameraSessionManager:
run = RunningSurgery(stop_event=stop_event, state=state, tasks=tasks)
init_consumption_log_file(surgery_id)
init_voice_log_file(surgery_id, self._s)
init_voice_log_file(surgery_id)
await self._registry.register(surgery_id, run)
if ba.TEAR_SEGMENT_ENABLED:
primary = (ba.TEAR_SEGMENT_PRIMARY_CAMERA_ID or "").strip()
if primary and primary not in camera_ids:
logger.warning(
"撕段算法已开启但主摄 id={!r} 不在本台开录 camera_ids={} 中,该路不会跑撕段流水线",
primary,
camera_ids,
)
try:
await asyncio.wait_for(
asyncio.gather(*(r.wait() for r in readies)),
@@ -292,6 +314,45 @@ class CameraSessionManager:
# ------------------------------------------------------------------
# Camera worker拉流 + 推理节流 + 时间窗分桶 + 分类结果处理)
# ------------------------------------------------------------------
async def _finalize_tear_segment_runner(
self,
*,
surgery_id: str,
camera_id: str,
state: SurgerySessionState,
runner: TearGatedSegmentRunner,
) -> None:
recs = runner.finalize()
for rec in recs:
wall_ts = runner.wall_time_for_record(rec)
detail_ts = datetime.fromtimestamp(wall_ts, tz=timezone.utc)
await self._registry.append_confirmed_detail(
state=state,
item_id=rec.item_id,
item_name=rec.item_name,
doctor_id=bp.VIDEO_RESULT_DOCTOR_ID,
source="tear_segment",
cooldown_key=f"{surgery_id}:tear_seg:{rec.segment_index}",
detail_timestamp=detail_ts,
)
if ba.TEAR_SEGMENT_LOG_TXT and recs:
raw_tpl = (ba.TEAR_SEGMENT_LOG_TXT_PATH or "").strip()
if raw_tpl and "{surgery_id}" in raw_tpl:
p = Path(raw_tpl.format(surgery_id=surgery_id)).expanduser()
elif raw_tpl:
p = Path(raw_tpl).expanduser()
else:
p = Path(f"logs/tear_segment_{surgery_id}.txt")
labels_src = str(resolve_tear_segment_labels_yaml_path())
write_tear_segment_txt(
path=p,
surgery_id=surgery_id,
camera_id=camera_id,
labels_source=labels_src,
records=recs,
)
logger.info("撕段报告已写: {}", p)
async def _camera_worker(
self,
*,
@@ -311,74 +372,119 @@ class CameraSessionManager:
)
assert url is not None
last_infer = 0.0
async def _frame_handler(frame: object) -> None:
nonlocal last_infer
now = time.monotonic()
if now - last_infer < self._s.video_inference_interval_sec:
await asyncio.sleep(0.01)
return
last_infer = now
primary = (ba.TEAR_SEGMENT_PRIMARY_CAMERA_ID or "").strip()
use_tear_req = (
ba.TEAR_SEGMENT_ENABLED
and self._tear_models is not None
and primary
and camera_id == primary
)
runner: TearGatedSegmentRunner | None = None
if use_tear_req:
name_to_id = load_tear_segment_name_to_id()
try:
snap = await asyncio.to_thread(
self._vision.infer_frame_bgr,
frame,
state.name_to_code,
)
self._tear_models.ensure_loaded()
runner = self._tear_models.create_runner(name_to_id)
except Exception as exc:
logger.debug(
"Inference skip camera={} surgery={}: {}",
logger.exception(
"撕段模型未就绪,本路回退为原时间窗算法 camera={} surgery={}: {}",
camera_id,
surgery_id,
exc,
)
return
runner = None
if snap is None:
return
if runner is not None:
if self._s.video_log_inference_results:
logger.info(
"Vision result surgery={} camera={} top1={}({:.3f}) top2={}({:.3f}) top3={}({:.3f})",
surgery_id,
camera_id,
snap.t1_name,
snap.t1_conf,
snap.t2_name,
snap.t2_conf,
snap.t3_name,
snap.t3_conf,
async def _frame_handler_tear(frame: object) -> None:
await asyncio.to_thread(runner.process_frame_bgr, frame)
w_tear = CameraStreamWorker(
surgery_id=surgery_id,
camera_id=camera_id,
url=url,
)
try:
await w_tear.run(
stream_ready=stream_ready,
stop_event=stop_event,
frame_handler=_frame_handler_tear,
)
async with state.lock:
ready_windows = self._aggregator.ingest_snapshot_and_collect_ready(
finally:
await self._finalize_tear_segment_runner(
surgery_id=surgery_id,
camera_id=camera_id,
snap=snap,
state=state,
runner=runner,
)
else:
last_infer = 0.0
for win in ready_windows:
await self._classifier_handler.handle(
state=state,
cls_res=win.prediction,
ready=win,
surgery_id=surgery_id,
camera_id=camera_id,
)
async def _frame_handler(frame: object) -> None:
nonlocal last_infer
now = time.monotonic()
if now - last_infer < bp.VIDEO_INFERENCE_INTERVAL_SEC:
await asyncio.sleep(0.01)
return
last_infer = now
try:
snap = await asyncio.to_thread(
self._vision.infer_frame_bgr,
frame,
state.name_to_code,
)
except Exception as exc:
logger.debug(
"Inference skip camera={} surgery={}: {}",
camera_id,
surgery_id,
exc,
)
return
worker = CameraStreamWorker(
settings=self._s,
surgery_id=surgery_id,
camera_id=camera_id,
url=url,
)
await worker.run(
stream_ready=stream_ready,
stop_event=stop_event,
frame_handler=_frame_handler,
)
if snap is None:
return
if bp.VIDEO_LOG_INFERENCE_RESULTS:
logger.info(
"Vision result surgery={} camera={} top1={}({:.3f}) top2={}({:.3f}) top3={}({:.3f})",
surgery_id,
camera_id,
snap.t1_name,
snap.t1_conf,
snap.t2_name,
snap.t2_conf,
snap.t3_name,
snap.t3_conf,
)
async with state.lock:
ready_windows = self._aggregator.ingest_snapshot_and_collect_ready(
surgery_id=surgery_id,
camera_id=camera_id,
snap=snap,
state=state,
)
for win in ready_windows:
await self._classifier_handler.handle(
state=state,
cls_res=win.prediction,
ready=win,
surgery_id=surgery_id,
camera_id=camera_id,
)
worker = CameraStreamWorker(
surgery_id=surgery_id,
camera_id=camera_id,
url=url,
)
await worker.run(
stream_ready=stream_ready,
stop_event=stop_event,
frame_handler=_frame_handler,
)
finally:
if hik_user_id is not None and self._hik is not None:
await asyncio.to_thread(self._hik.logout, hik_user_id)

View File

@@ -14,7 +14,7 @@ from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Literal
from app.config import Settings
from app.baked import pipeline as bp
from app.domain.consumption import SurgeryConsumptionStored
from app.services.consumable_vision_algorithm import (
ClsTop3,
@@ -89,8 +89,7 @@ class SurgerySessionRegistry:
生命周期归 ``CameraSessionManager`` 负责,新增/停止会话都走本类。
"""
def __init__(self, *, settings: Settings) -> None:
self._s = settings
def __init__(self) -> None:
self._active: dict[str, RunningSurgery] = {}
self._manager_lock = asyncio.Lock()
@@ -168,7 +167,7 @@ class SurgerySessionRegistry:
if run is None:
return 0, 0
st = run.state
max_r = int(self._s.voice_confirm_max_failed_parse_rounds)
max_r = int(bp.VOICE_CONFIRM_MAX_FAILED_PARSE_ROUNDS)
async with st.lock:
p = st.pending_by_id.get(confirmation_id)
if p is None or p.status != "pending":
@@ -250,7 +249,7 @@ class SurgerySessionRegistry:
item_id=item_id,
item_name=label,
qty=1,
doctor_id=self._s.video_voice_confirm_doctor_id,
doctor_id=bp.VIDEO_VOICE_CONFIRM_DOCTOR_ID,
timestamp=datetime.now(timezone.utc),
source="voice",
pending_confirmation_id=None,
@@ -272,7 +271,7 @@ class SurgerySessionRegistry:
state=st,
item_id=item_id,
item_name=label,
doctor_id=self._s.video_voice_confirm_doctor_id,
doctor_id=bp.VIDEO_VOICE_CONFIRM_DOCTOR_ID,
source="voice",
)
self._finalize_voice_confirmed_consumption_log(
@@ -305,9 +304,9 @@ class SurgerySessionRegistry:
confirmation_id=confirmation_id,
name_to_code=state.name_to_code,
chosen_label=cl,
doctor_id=self._s.video_voice_confirm_doctor_id,
doctor_id=bp.VIDEO_VOICE_CONFIRM_DOCTOR_ID,
wall_epoch=time.time(),
tsv_enabled=self._s.consumption_tsv_log_enabled,
tsv_enabled=bp.CONSUMPTION_TSV_LOG_ENABLED,
)
def _append_confirmed_detail_locked(
@@ -318,21 +317,29 @@ class SurgerySessionRegistry:
item_name: str,
doctor_id: str,
source: str,
cooldown_key: str | None = None,
detail_timestamp: datetime | None = None,
) -> None:
"""在已持有 ``state.lock`` 时追加一条消耗明细。"""
"""在已持有 ``state.lock`` 时追加一条消耗明细。
``cooldown_key``:非空时用于 `video_detail_cooldown_sec` 去重(例如撕段每段独立键,避免同 SKU 多段被吞)。
``detail_timestamp``:非空时写入该 UTC 时刻,否则为当前时间。
"""
dedupe = cooldown_key if cooldown_key is not None else item_id
now_m = time.monotonic()
cooldown = self._s.video_detail_cooldown_sec
prev = state.last_detail_monotonic.get(item_id)
cooldown = bp.VIDEO_DETAIL_COOLDOWN_SEC
prev = state.last_detail_monotonic.get(dedupe)
if prev is not None and (now_m - prev) < cooldown:
return
state.last_detail_monotonic[item_id] = now_m
state.last_detail_monotonic[dedupe] = now_m
ts = detail_timestamp if detail_timestamp is not None else datetime.now(timezone.utc)
state.details.append(
SurgeryConsumptionStored(
item_id=item_id,
item_name=item_name,
qty=1,
doctor_id=doctor_id,
timestamp=datetime.now(timezone.utc),
timestamp=ts,
source=source,
pending_confirmation_id=None,
)
@@ -380,6 +387,8 @@ class SurgerySessionRegistry:
item_name: str,
doctor_id: str,
source: str,
cooldown_key: str | None = None,
detail_timestamp: datetime | None = None,
) -> None:
async with state.lock:
self._append_confirmed_detail_locked(
@@ -388,6 +397,8 @@ class SurgerySessionRegistry:
item_name=item_name,
doctor_id=doctor_id,
source=source,
cooldown_key=cooldown_key,
detail_timestamp=detail_timestamp,
)
async def enqueue_pending_confirmation(
@@ -403,7 +414,7 @@ class SurgerySessionRegistry:
if not opts:
return None
now_m = time.monotonic()
cooldown = self._s.video_detail_cooldown_sec
cooldown = bp.VIDEO_DETAIL_COOLDOWN_SEC
dedupe_key = f"pending_confirm:{top_key}:{opts[0][0]}"
async with state.lock:
prev = state.last_detail_monotonic.get(dedupe_key)

View File

@@ -16,7 +16,7 @@ from typing import Awaitable, Callable
from loguru import logger
from app.config import Settings
from app.baked import pipeline as bp
from app.services.video.rtsp_capture import RtspCapture
@@ -38,12 +38,10 @@ class CameraStreamWorker:
def __init__(
self,
*,
settings: Settings,
surgery_id: str,
camera_id: str,
url: str,
) -> None:
self._s = settings
self._surgery_id = surgery_id
self._camera_id = camera_id
self._url = url
@@ -65,7 +63,7 @@ class CameraStreamWorker:
if cap is None:
try:
cap = RtspCapture(
self._url, open_timeout_sec=self._s.video_open_timeout_sec
self._url, open_timeout_sec=bp.VIDEO_OPEN_TIMEOUT_SEC
)
await asyncio.to_thread(cap.open)
consecutive_failures = 0
@@ -89,7 +87,7 @@ class CameraStreamWorker:
if cap is not None:
await asyncio.to_thread(cap.release)
cap = None
await asyncio.sleep(self._s.video_reconnect_backoff_seconds)
await asyncio.sleep(bp.VIDEO_RECONNECT_BACKOFF_SECONDS)
continue
ok, frame = await asyncio.to_thread(cap.read)
@@ -97,7 +95,7 @@ class CameraStreamWorker:
consecutive_failures += 1
if (
consecutive_failures
>= self._s.video_read_failure_reconnect_threshold
>= bp.VIDEO_READ_FAILURE_RECONNECT_THRESHOLD
):
logger.warning(
"RTSP reconnect camera={} surgery={} url={} after {} read failures",
@@ -109,7 +107,7 @@ class CameraStreamWorker:
await asyncio.to_thread(cap.release)
cap = None
consecutive_failures = 0
await asyncio.sleep(self._s.video_reconnect_backoff_seconds)
await asyncio.sleep(bp.VIDEO_RECONNECT_BACKOFF_SECONDS)
else:
await asyncio.sleep(0.05)
continue