新增推理视频流的逻辑

This commit is contained in:
2025-03-14 09:35:44 +08:00
parent 9d369b9898
commit 8478b4a102
2 changed files with 16 additions and 29 deletions

View File

@ -122,7 +122,7 @@ def run_detect_yolo(detect_log_in: ProjectDetectLogIn, session: Session = Depend
return rc.response_error("推理集合中没有内容,请先到推理集合中上传图片") return rc.response_error("推理集合中没有内容,请先到推理集合中上传图片")
if detect.file_type == 'img' or detect.file_type == 'video': if detect.file_type == 'img' or detect.file_type == 'video':
detect_log = pds.run_detect_yolo(detect_log_in, detect, train, session) detect_log = pds.run_detect_yolo(detect_log_in, detect, train, session)
thread_train = threading.Thread(target=run_event_loop, thread_train = threading.Thread(target=run_img_loop,
args=(detect_log.pt_url, detect_log.folder_url, detect_log.detect_folder_url, args=(detect_log.pt_url, detect_log.folder_url, detect_log.detect_folder_url,
detect_log.detect_version, detect_log.id, detect_log.detect_id, session,)) detect_log.detect_version, detect_log.id, detect_log.detect_id, session,))
thread_train.start() thread_train.start()
@ -137,11 +137,20 @@ def run_detect_yolo(detect_log_in: ProjectDetectLogIn, session: Session = Depend
return rc.response_success(msg="执行成功") return rc.response_success(msg="执行成功")
def run_event_loop(weights: str, source: str, project: str, name: str, log_id: int, detect_id: int, session: Session): def run_img_loop(weights: str, source: str, project: str, name: str, log_id: int, detect_id: int, session: Session):
loop = asyncio.new_event_loop() loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop) asyncio.set_event_loop(loop)
# 运行异步函数 # 运行异步函数
loop.run_until_complete(pds.run_commend(weights, source, project, name, log_id, detect_id, session)) loop.run_until_complete(pds.run_detect_img(weights, source, project, name, log_id, detect_id, session))
# 可选: 关闭循环
loop.close()
def run_rtsp_loop(weights_pt: str, rtsp_url: str, data: str, detect_id: int):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# 运行异步函数
loop.run_until_complete(pds.run_detect_rtsp(weights_pt, rtsp_url, data, detect_id,))
# 可选: 关闭循环 # 可选: 关闭循环
loop.close() loop.close()

View File

@ -128,8 +128,7 @@ def run_detect_yolo(detect_in: ProjectDetectLogIn, detect: ProjectDetect, train:
return detect_log return detect_log
async def run_commend(weights: str, source: str, project: str, name: str, async def run_detect_img(weights: str, source: str, project: str, name: str, log_id: int, detect_id: int, session: Session):
log_id: int, detect_id: int, session: Session):
""" """
执行yolov5的推理 执行yolov5的推理
:param weights: 权重文件 :param weights: 权重文件
@ -180,9 +179,10 @@ async def run_commend(weights: str, source: str, project: str, name: str,
pdc.add_detect_imgs(detect_log_imgs, session) pdc.add_detect_imgs(detect_log_imgs, session)
def run_detect_rtsp(weights_pt: str, rtsp_url: str, data: str): async def run_detect_rtsp(weights_pt: str, rtsp_url: str, data: str, detect_id: int):
""" """
rtsp 视频流推理 rtsp 视频流推理
:param detect_id: 训练集的id
:param weights_pt: 权重文件 :param weights_pt: 权重文件
:param rtsp_url: 视频流地址 :param rtsp_url: 视频流地址
:param data: yaml文件 :param data: yaml文件
@ -233,43 +233,21 @@ def run_detect_rtsp(weights_pt: str, rtsp_url: str, data: str):
# Process predictions # Process predictions
for i, det in enumerate(pred): # per image for i, det in enumerate(pred): # per image
seen += 1
p, im0, frame = path[i], im0s[i].copy(), dataset.count p, im0, frame = path[i], im0s[i].copy(), dataset.count
s += f"{i}: "
p = Path(p) # to Path
s += "{:g}x{:g} ".format(*im.shape[2:]) # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if False else im0 # for save_crop
annotator = Annotator(im0, line_width=3, example=str(names)) annotator = Annotator(im0, line_width=3, example=str(names))
if len(det): if len(det):
# Rescale boxes from img_size to im0 size # Rescale boxes from img_size to im0 size
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, 5].unique():
n = (det[:, 5] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Write results # Write results
for *xyxy, conf, cls in reversed(det): for *xyxy, conf, cls in reversed(det):
c = int(cls) # integer class
label = names[c] if False else f"{names[c]}"
confidence = float(conf)
confidence_str = f"{confidence:.2f}"
c = int(cls) # integer class c = int(cls) # integer class
label = None if False else (names[c] if False else f"{names[c]} {conf:.2f}") label = None if False else (names[c] if False else f"{names[c]} {conf:.2f}")
annotator.box_label(xyxy, label, color=colors(c, True)) annotator.box_label(xyxy, label, color=colors(c, True))
# Stream results # Stream results
im0 = annotator.result() im0 = annotator.result()
if platform.system() == "Linux" and p not in windows:
windows.append(p)
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond