-
Good day. I'm trying to do the following: The frame is divided into zones, and a separate person enters each zone. I encountered a problem that since this is detection, if a person twitches in the frame, the system loses him and all counters are restarted despite the fact that the character did not leave the zone. As far as I understand, I need to enter tracking. The bytetrack is not suitable because it does not match the version of Torch I am using. I found out that Yolo has native tracking, but still didn’t understand how to use it. How can I implement yolo tracking into my script? I need an trackerID to work with time and track the movement of an object. Thank you in advance. import streamlit as st
import cv2
import numpy as np
import supervision as sv
from ultralytics import YOLO
import queue
import threading
import time
def format_time(seconds):
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return f"{int(hours):02}:{int(minutes):02}:{int(seconds):02}"
class CountObject():
def __init__(self, rtsp_url, frame_resolution_wh=(2560, 1440)):
self.model = YOLO('yolov8s.pt')
self.model.predict(source="0", show=False, stream=True, classes=0, conf=0.9)
self.colors = sv.ColorPalette.default()
self.polygons = np.array([
[[0, 0], [0.4, 0], [0.4, 1], [0, 1]],
[[0.6, 0], [1, 0], [1, 1], [0.6, 1]]
])
self.polygons = (self.polygons * np.array(frame_resolution_wh)).astype(int)
self.zones = [sv.PolygonZone(polygon=polygon, frame_resolution_wh=frame_resolution_wh) for polygon in self.polygons]
self.zone_annotators = [sv.PolygonZoneAnnotator(zone=zone, color=self.colors.by_idx(index), thickness=6, text_thickness=8, text_scale=4) for index, zone in enumerate(self.zones)]
self.box_annotators = [sv.BoxAnnotator(color=self.colors.by_idx(index), thickness=4, text_thickness=4, text_scale=2) for index in range(len(self.polygons))]
self.enter_times = {zone_index: {} for zone_index in range(len(self.zones))}
def process_frame(self, frame):
current_time = time.time()
results = self.model(frame, imgsz=1280)[0]
detections = sv.Detections.from_yolov8(results)
detections = detections[(detections.class_id == 0) & (detections.confidence > 0.5)]
annotated_frame = frame.copy()
for zone_index, (zone, zone_annotator, box_annotator) in enumerate(zip(self.zones, self.zone_annotators, self.box_annotators)):
mask = zone.trigger(detections=detections)
detections_filtered = detections[mask]
labels = []
objects_in_zone = {}
for i, detection in enumerate(detections_filtered):
object_id = f'# {i+1}'
if object_id not in self.enter_times[zone_index]:
self.enter_times[zone_index][object_id] = current_time
time_in_zone = current_time - self.enter_times[zone_index][object_id]
formatted_time = format_time(time_in_zone)
labels.append(f'{object_id} - {formatted_time}')
objects_in_zone[object_id] = True
keys_to_remove = []
for object_id in self.enter_times[zone_index].keys():
if object_id not in objects_in_zone:
keys_to_remove.append(object_id)
for object_id in keys_to_remove:
del self.enter_times[zone_index][object_id]
annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections_filtered, labels=labels, skip_label=False)
annotated_frame = zone_annotator.annotate(scene=annotated_frame)
return annotated_frame
class VideoCapture:
...
def main():
st.title("Object Detection and Tracking")
rtsp_url = ''
cap = VideoCapture(rtsp_url)
count_object_instance = CountObject(rtsp_url)
st_frame = st.empty()
while True:
frame = cap.read()
frame = count_object_instance.process_frame(frame)
st_frame.image(frame, channels="BGR")
if __name__ == "__main__":
main() |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 1 reply
-
hello, please find in the following link an example @SkalskiP did a long time ago implementing yolov8 native tracking https://github.com/SkalskiP/yolov8-native-tracking/blob/master/main.py below a sniped of the relevant changes you might need to implement , for result in model.track(source=video_source, show=False, stream=True,save=False, agnostic_nms=True, iou=0.7,conf=0.4): hope it helps |
Beta Was this translation helpful? Give feedback.
-
Hi, @epigraphe and @maddust 👋🏻 I sped some time working on the "How to: Track Objects" docs. I hope you will find it helpful! https://supervision.roboflow.com/how_to/track_objects/ |
Beta Was this translation helpful? Give feedback.
Hi, @epigraphe and @maddust 👋🏻 I sped some time working on the "How to: Track Objects" docs. I hope you will find it helpful! https://supervision.roboflow.com/how_to/track_objects/