Ultralytics YOLO11 ๐๋ฅผ ์ฌ์ฉํ VisionEye ๋ทฐ ์ค๋ธ์ ํธ ๋งคํ
๋น์ ์์ด ์ค๋ธ์ ํธ ๋งคํ์ด๋ ๋ฌด์์ธ๊ฐ์?
Ultralytics YOLO11 VisionEye๋ ์ปดํจํฐ๊ฐ ๋ฌผ์ฒด๋ฅผ ์๋ณํ๊ณ ์ ํํ ์ฐพ์๋ผ ์ ์๋ ๊ธฐ๋ฅ์ ์ ๊ณตํ์ฌ ์ฌ๋์ ๋์ ๊ด์ฐฐ ์ ๋ฐ๋๋ฅผ ์๋ฎฌ๋ ์ด์ ํฉ๋๋ค. ์ด ๊ธฐ๋ฅ์ ํตํด ์ปดํจํฐ๋ ์ฌ๋์ ๋์ด ํน์ ์์ ์์ ์ธ๋ถ ์ฌํญ์ ๊ด์ฐฐํ๋ ๋ฐฉ์๊ณผ ๋ง์ฐฌ๊ฐ์ง๋ก ํน์ ๋ฌผ์ฒด๋ฅผ ์๋ณํ๊ณ ์ง์คํ ์ ์์ต๋๋ค.
์ํ
๋น์ ์์ด ๋ทฐ | ๊ฐ์ฒด ์ถ์ ๊ธฐ๋ฅ์ด ์๋ VisionEye ๋ทฐ | ๊ฑฐ๋ฆฌ ๊ณ์ฐ์ด ํฌํจ๋ VisionEye ๋ณด๊ธฐ |
---|---|---|
VisionEye ๋ทฐ ๊ฐ์ฒด ๋งคํ์ ์ฌ์ฉํ์ฌ Ultralytics YOLO11 | ๋ค์์ ์ฌ์ฉํ์ฌ ๊ฐ์ฒด ์ถ์ ์ ํตํ VisionEye ๋ณด๊ธฐ ๊ฐ์ฒด ๋งคํ Ultralytics YOLO11 | ๋ค์์ ์ฌ์ฉํ์ฌ ๊ฑฐ๋ฆฌ ๊ณ์ฐ์ด ํฌํจ๋ VisionEye ๋ณด๊ธฐ Ultralytics YOLO11 |
VisionEye ์ค๋ธ์ ํธ ๋งคํ์ ์ฌ์ฉํ์ฌ YOLO11
import cv2
from ultralytics import YOLO
from ultralytics.utils.plotting import Annotator, colors
model = YOLO("yolo11n.pt")
names = model.model.names
cap = cv2.VideoCapture("path/to/video/file.mp4")
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter("visioneye-pinpoint.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h))
center_point = (-10, h)
while True:
ret, im0 = cap.read()
if not ret:
print("Video frame is empty or video processing has been successfully completed.")
break
results = model.predict(im0)
boxes = results[0].boxes.xyxy.cpu()
clss = results[0].boxes.cls.cpu().tolist()
annotator = Annotator(im0, line_width=2)
for box, cls in zip(boxes, clss):
annotator.box_label(box, label=names[int(cls)], color=colors(int(cls)))
annotator.visioneye(box, center_point)
out.write(im0)
cv2.imshow("visioneye-pinpoint", im0)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
out.release()
cap.release()
cv2.destroyAllWindows()
import cv2
from ultralytics import YOLO
from ultralytics.utils.plotting import Annotator, colors
model = YOLO("yolo11n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter("visioneye-pinpoint.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h))
center_point = (-10, h)
while True:
ret, im0 = cap.read()
if not ret:
print("Video frame is empty or video processing has been successfully completed.")
break
annotator = Annotator(im0, line_width=2)
results = model.track(im0, persist=True)
boxes = results[0].boxes.xyxy.cpu()
if results[0].boxes.id is not None:
track_ids = results[0].boxes.id.int().cpu().tolist()
for box, track_id in zip(boxes, track_ids):
annotator.box_label(box, label=str(track_id), color=colors(int(track_id)))
annotator.visioneye(box, center_point)
out.write(im0)
cv2.imshow("visioneye-pinpoint", im0)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
out.release()
cap.release()
cv2.destroyAllWindows()
import math
import cv2
from ultralytics import YOLO
from ultralytics.utils.plotting import Annotator
model = YOLO("yolo11n.pt")
cap = cv2.VideoCapture("Path/to/video/file.mp4")
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter("visioneye-distance-calculation.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h))
center_point = (0, h)
pixel_per_meter = 10
txt_color, txt_background, bbox_clr = ((0, 0, 0), (255, 255, 255), (255, 0, 255))
while True:
ret, im0 = cap.read()
if not ret:
print("Video frame is empty or video processing has been successfully completed.")
break
annotator = Annotator(im0, line_width=2)
results = model.track(im0, persist=True)
boxes = results[0].boxes.xyxy.cpu()
if results[0].boxes.id is not None:
track_ids = results[0].boxes.id.int().cpu().tolist()
for box, track_id in zip(boxes, track_ids):
annotator.box_label(box, label=str(track_id), color=bbox_clr)
annotator.visioneye(box, center_point)
x1, y1 = int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2) # Bounding box centroid
distance = (math.sqrt((x1 - center_point[0]) ** 2 + (y1 - center_point[1]) ** 2)) / pixel_per_meter
text_size, _ = cv2.getTextSize(f"Distance: {distance:.2f} m", cv2.FONT_HERSHEY_SIMPLEX, 1.2, 3)
cv2.rectangle(im0, (x1, y1 - text_size[1] - 10), (x1 + text_size[0] + 10, y1), txt_background, -1)
cv2.putText(im0, f"Distance: {distance:.2f} m", (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 1.2, txt_color, 3)
out.write(im0)
cv2.imshow("visioneye-distance-calculation", im0)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
out.release()
cap.release()
cv2.destroyAllWindows()
visioneye
์ธ์
์ด๋ฆ | ์ ํ | ๊ธฐ๋ณธ๊ฐ | ์ค๋ช |
---|---|---|---|
color |
tuple |
(235, 219, 11) |
์ ๋ฐ ๊ฐ์ฒด ์ค์ฌ์ |
pin_color |
tuple |
(255, 0, 255) |
VisionEye ํํฌ์ธํธ ์ปฌ๋ฌ |
์ฐธ๊ณ
๋ฌธ์ ์ฌํญ์ด ์์ผ์๋ฉด Ultralytics ์ด์ ์น์ ๋๋ ์๋์ ์ธ๊ธ๋ ํ ๋ก ์น์ ์ ์์ ๋กญ๊ฒ ์ง๋ฌธ์ ๊ฒ์ํด ์ฃผ์ธ์.
์์ฃผ ๋ฌป๋ ์ง๋ฌธ
VisionEye ๊ฐ์ฒด ๋งคํ์ ์ฌ์ฉํ๋ ค๋ฉด ์ด๋ป๊ฒ ์์ํ๋์ Ultralytics YOLO11 ?
Ultralytics YOLO11 ์์ VisionEye ๊ฐ์ฒด ๋งคํ์ ์ฌ์ฉํ๋ ค๋ฉด ๋จผ์ pip๋ฅผ ํตํด Ultralytics YOLO ํจํค์ง๋ฅผ ์ค์นํด์ผ ํฉ๋๋ค. ๊ทธ๋ฐ ๋ค์ ์ค๋ช ์์ ์ ๊ณต๋ ์ํ ์ฝ๋๋ฅผ ์ฌ์ฉํ์ฌ VisionEye๋ก ๊ฐ์ฒด ๊ฐ์ง๋ฅผ ์ค์ ํ ์ ์์ต๋๋ค. ๋ค์์ ์์ํ๋ ๋ฐ ๋์์ด ๋๋ ๊ฐ๋จํ ์์ ์ ๋๋ค:
import cv2
from ultralytics import YOLO
model = YOLO("yolo11n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
while True:
ret, frame = cap.read()
if not ret:
break
results = model.predict(frame)
for result in results:
# Perform custom logic with result
pass
cv2.imshow("visioneye", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
Ultralytics YOLO11 ์ ์ฌ์ฉํ๋ VisionEye์ ๊ฐ์ฒด ์ถ์ ๊ธฐ๋ฅ์ ์ฃผ์ ๊ธฐ๋ฅ์ ๋ฌด์์ธ๊ฐ์?
VisionEye์ ๊ฐ์ฒด ์ถ์ ( Ultralytics YOLO11 )์ ํตํด ์ฌ์ฉ์๋ ๋น๋์ค ํ๋ ์ ๋ด์์ ๊ฐ์ฒด์ ์์ง์์ ์ถ์ ํ ์ ์์ต๋๋ค. ์ฃผ์ ๊ธฐ๋ฅ์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค:
- ์ค์๊ฐ ๊ฐ์ฒด ์ถ์ : ๋ฌผ์ฒด๊ฐ ์์ง์ผ ๋ ๋ฐ๋ผ์ก์ต๋๋ค.
- ๊ฐ์ฒด ์๋ณ: YOLO11 ์ ๊ฐ๋ ฅํ ํ์ง ์๊ณ ๋ฆฌ์ฆ์ ํ์ฉํฉ๋๋ค.
- ๊ฑฐ๋ฆฌ ๊ณ์ฐ: ๊ฐ์ฒด์ ์ง์ ๋ ์ง์ ์ฌ์ด์ ๊ฑฐ๋ฆฌ๋ฅผ ๊ณ์ฐํฉ๋๋ค.
- ์ฃผ์ ๋ฐ ์๊ฐํ: ์ถ์ ๋ ๊ฐ์ฒด์ ๋ํ ์๊ฐ์ ๋ง์ปค๋ฅผ ์ ๊ณตํฉ๋๋ค.
๋ค์์ VisionEye๋ฅผ ์ฌ์ฉํ ํธ๋ํน์ ๋ณด์ฌ์ฃผ๋ ๊ฐ๋จํ ์ฝ๋ ์ค๋ํซ์ ๋๋ค:
import cv2
from ultralytics import YOLO
model = YOLO("yolo11n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
while True:
ret, frame = cap.read()
if not ret:
break
results = model.track(frame, persist=True)
for result in results:
# Annotate and visualize tracking
pass
cv2.imshow("visioneye-tracking", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
์ข ํฉ์ ์ธ ๊ฐ์ด๋๋ ๊ฐ์ฒด ์ถ์ ์ ์ฌ์ฉํ VisionEye ๊ฐ์ฒด ๋งคํ์ ์ฐธ์กฐํ์ธ์.
VisionEye์ YOLO11 ๋ชจ๋ธ๋ก ๊ฑฐ๋ฆฌ๋ฅผ ๊ณ์ฐํ๋ ค๋ฉด ์ด๋ป๊ฒ ํด์ผ ํ๋์?
VisionEye ๋ฐ Ultralytics YOLO11 ์ ์ฌ์ฉํ ๊ฑฐ๋ฆฌ ๊ณ์ฐ์๋ ํ๋ ์์ ์ง์ ๋ ์ง์ ์์ ๊ฐ์ง๋ ๋ฌผ์ฒด์ ๊ฑฐ๋ฆฌ๋ฅผ ๊ฒฐ์ ํ๋ ๊ฒ์ด ํฌํจ๋ฉ๋๋ค. ์ด๋ ๊ณต๊ฐ ๋ถ์ ๊ธฐ๋ฅ์ ํฅ์์์ผ ์์จ ์ฃผํ ๋ฐ ๊ฐ์์ ๊ฐ์ ์ ํ๋ฆฌ์ผ์ด์ ์ ์ ์ฉํฉ๋๋ค.
๋ค์์ ๊ฐ๋จํ ์์ ๋๋ค:
import math
import cv2
from ultralytics import YOLO
model = YOLO("yolo11n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
center_point = (0, 480) # Example center point
pixel_per_meter = 10
while True:
ret, frame = cap.read()
if not ret:
break
results = model.track(frame, persist=True)
for result in results:
# Calculate distance logic
distances = [
(math.sqrt((box[0] - center_point[0]) ** 2 + (box[1] - center_point[1]) ** 2)) / pixel_per_meter
for box in results
]
cv2.imshow("visioneye-distance", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
์์ธํ ์ง์นจ์ ๊ฑฐ๋ฆฌ ๊ณ์ฐ ๊ธฐ๋ฅ์ด ์๋ VisionEye๋ฅผ ์ฐธ์กฐํ์ธ์.
๊ฐ์ฒด ๋งคํ ๋ฐ ์ถ์ ์ Ultralytics YOLO11 ์ ์ฌ์ฉํด์ผ ํ๋ ์ด์ ๋ ๋ฌด์์ธ๊ฐ์?
Ultralytics YOLO11 ๋ ์๋, ์ ํ์ฑ, ํตํฉ ์ฉ์ด์ฑ์ผ๋ก ์ ์๋ ค์ ธ ์์ด ๊ฐ์ฒด ๋งคํ ๋ฐ ์ถ์ ์ ์ต๊ณ ์ ์ ํ์ ๋๋ค. ์ฃผ์ ์ฅ์ ์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค:
- ์ต์ฒจ๋จ ์ฑ๋ฅ: ์ค์๊ฐ ๋ฌผ์ฒด ๊ฐ์ง์์ ๋์ ์ ํ๋๋ฅผ ์ ๊ณตํฉ๋๋ค.
- ์ ์ฐ์ฑ: ๊ฐ์ง, ์ถ์ , ๊ฑฐ๋ฆฌ ๊ณ์ฐ ๋ฑ ๋ค์ํ ์์ ์ ์ง์ํฉ๋๋ค.
- ์ปค๋ฎค๋ํฐ ๋ฐ ์ง์: ๋ฌธ์ ํด๊ฒฐ ๋ฐ ๊ฐ์ ์ ์ํ ๊ด๋ฒ์ํ ๋ฌธ์์ ํ๋ฐํ GitHub ์ปค๋ฎค๋ํฐ.
- ์ฌ์ฉ ํธ์์ฑ: ์ง๊ด์ ์ธ API๋ก ๋ณต์กํ ์์ ์ ๊ฐ์ํํ์ฌ ์ ์ํ ๋ฐฐํฌ์ ๋ฐ๋ณต์ด ๊ฐ๋ฅํฉ๋๋ค.
์ ์ฒญ ๋ฐ ํํ์ ๋ํ ์์ธํ ๋ด์ฉ์ Ultralytics YOLO11 ๋ฌธ์๋ฅผ ์ฐธ์กฐํ์ธ์.
VisionEye๋ฅผ Comet ๋๋ ClearML ๊ณผ ๊ฐ์ ๋ค๋ฅธ ๋จธ์ ๋ฌ๋ ๋๊ตฌ์ ํตํฉํ๋ ค๋ฉด ์ด๋ป๊ฒ ํด์ผ ํ๋์?
Ultralytics YOLO11 ๋ Comet ๋ฐ ClearML ๊ณผ ๊ฐ์ ๋ค์ํ ๋จธ์ ๋ฌ๋ ๋๊ตฌ์ ์ํํ๊ฒ ํตํฉ๋์ด ์คํ ์ถ์ , ํ์ ๋ฐ ์ฌํ์ฑ์ ํฅ์์ํฌ ์ ์์ต๋๋ค. YOLOv5 ์ Comet ๊ณผ ์ฌ์ฉํ๋ ๋ฐฉ๋ฒ๊ณผ YOLO11 ์ ClearML ๊ณผ ํตํฉํ๋ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๊ฐ์ด๋๋ฅผ ์ฐธ์กฐํ์ฌ ์์ํ์ธ์.
์์ธํ ํ์ ๋ฐ ํตํฉ ์์๋ Ultralytics ํตํฉ ๊ฐ์ด๋๋ฅผ ์ฐธ์กฐํ์ธ์.