python
1import cv2 2import os 3import csv 4from ultralytics import YOLO 5from collections import defaultdict 6import numpy as np 7from datetime import datetime 8from shapely.geometry import Point 9from shapely.geometry.polygon import Polygon 10import threading 11import gc 12 13# 定期的にガベージコレクションを実行14gc.collect()15 16# エリア設定17multiplier = 118 19# カメラ1エリア設定[(右上), (右下), (左下), (左上)]20area1_list = [(490, 180), (580, 280), (370, 500), (230, 420)]21area2_list = [(350, 50), (470, 140), (220, 380), (100, 260)]22area1 = [(x * multiplier, y * multiplier) for x, y in area1_list]23area2 = [(x * multiplier, y * multiplier) for x, y in area2_list]24 25polygon1 = Polygon(area1)26polygon2 = Polygon(area2)27 28# カメラ2エリア設定[(右上), (右下), (左下), (左上)]29area3_list = [(450, 220), (580, 350), (390, 570), (170, 460)]30area4_list = [(330, 50), (400, 200), (140, 430), (20, 300)]31area3 = [(x * multiplier, y * multiplier) for x, y in area3_list]32area4 = [(x * multiplier, y * multiplier) for x, y in area4_list]33 34polygon3 = Polygon(area3)35polygon4 = Polygon(area4)36 37# カメラ3エリア設定[(右上), (右下), (左下), (左上)]38area5_list = [(450, 220), (580, 350), (390, 570), (170, 460)]39area6_list = [(330, 50), (400, 200), (140, 430), (20, 300)]40area5 = [(x * multiplier, y * multiplier) for x, y in area5_list]41area6 = [(x * multiplier, y * multiplier) for x, y in area6_list]42 43polygon5 = Polygon(area5)44polygon6 = Polygon(area6)45 46# フォルダを作成してCSVファイルの保存パスを指定47output_folder = "./count"48os.makedirs(output_folder, exist_ok=True)49 50# CSVファイルの保存パス51csv_file_path = "./count/overall_count.csv"52 53# CSVファイルのヘッダー54csv_header = ["Camera", "count", "area1_count", "area2_count", "Time"]55 56# CSVファイルを開いてヘッダーを書き込み57with open(csv_file_path, mode='w', newline='') as csv_file:58 csv_writer = csv.writer(csv_file)59 csv_writer.writerow(csv_header)60 61# モデル読み込み62model = YOLO('./best.pt')63 64# カメラ情報65camera_sources = [66 ("Camera1", "rtsp://root:pass1111@192.xxx.xx.xxx/axis-media/media.amp"),67 ("Camera2", "rtsp://root:pass1111@192.xxx.xx.xxx/axis-media/media.amp"),68 ("Camera3", "rtsp://root:pass1111@192.xxx.xx.xxx/axis-media/media.amp")69]70 71# Cameraのエリア設定72camera_areas = {73 "Camera1": [polygon1, polygon2],74 "Camera2": [polygon3, polygon4],75 "Camera3": [polygon5, polygon6]76}77 78# ウィンドウサイズ79window_sizes = {80 "Camera1": (640, 640), 81 "Camera2": (640, 640), 82 "Camera3": (640, 640) 83}84 85# フォント設定86font = cv2.FONT_HERSHEY_SIMPLEX 87font_scale = 1.088font_color = (255, 255, 255)89font_thickness = 290 91def process_camera(camera_name, camera_source):92 cap = cv2.VideoCapture(camera_source)93 track_id_to_csv = {} # idを保管する辞書94 frame_count = 095 96 # ウィンドウの作成とサイズ設定97 cv2.namedWindow(f"{camera_name} YOLOv8 Tracking", cv2.WINDOW_NORMAL)98 cv2.resizeWindow(f"{camera_name} YOLOv8 Tracking", *window_sizes[camera_name])99 100 # カメラごとにエリアを設定101 if camera_name == "Camera1":102 polygon1_camera = polygon1 103 polygon2_camera = polygon2 104 elif camera_name == "Camera2":105 polygon1_camera = polygon3 106 polygon2_camera = polygon4 107 elif camera_name == "Camera3":108 polygon1_camera = polygon5 109 polygon2_camera = polygon6 110 else:111 polygon1_camera = None112 polygon2_camera = None113 114 while cap.isOpened():115 success, frame = cap.read()116 if success:117 frame_count += 1118 results = model.track(frame, persist=True, conf=0.2, device='0', tracker="bytetrack.yaml")119 120 if results[0].boxes.id is None:121 continue122 123 boxes = results[0].boxes.xywh.cpu().numpy()124 track_ids = results[0].boxes.id.cpu().numpy().astype(int)125 count = len(boxes)126 127 annotated_frame = results[0].plot()128 if camera_name == "Camera1":129 cv2.polylines(annotated_frame, [np.array(area1)], isClosed=True, color=(0, 0, 255), thickness=2)130 cv2.polylines(annotated_frame, [np.array(area2)], isClosed=True, color=(255, 0, 0), thickness=2)131 elif camera_name == "Camera2":132 cv2.polylines(annotated_frame, [np.array(area3)], isClosed=True, color=(0, 0, 255), thickness=2)133 cv2.polylines(annotated_frame, [np.array(area4)], isClosed=True, color=(255, 0, 0), thickness=2)134 elif camera_name == "Camera3":135 cv2.polylines(annotated_frame, [np.array(area5)], isClosed=True, color=(0, 0, 255), thickness=2)136 cv2.polylines(annotated_frame, [np.array(area6)], isClosed=True, color=(255, 0, 0), thickness=2)137 138 # 初期化139 area_count1 = 0140 area_count2 = 0141 142 for box, track_id in zip(boxes, track_ids):143 x, y, w, h = box 144 point = Point(float(x), float(y))145 146 # トラックIDごとのCSVファイルを管理147 if track_id not in track_id_to_csv:148 csv_file = os.path.join(output_folder, f"{camera_name}_track_{track_id}.csv")149 with open(csv_file, 'w', newline='') as csvfile:150 csv_writer = csv.writer(csvfile)151 csv_writer.writerow(["X", "Y", "area", "Time"]) # ヘッダを書き込む152 track_id_to_csv[track_id] = csv_file 153 154 if polygon1_camera.contains(point): # cameraごとにエリアが異なるため、area1に人がいた場合155 area_count1 += 1156 if int(frame_count % cap.get(cv2.CAP_PROP_FPS)) == 0: # 1秒ごとに各idの位置座標を記録157 with open(track_id_to_csv[track_id], 'a', newline='') as csvfile:158 csv_writer = csv.writer(csvfile)159 current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")160 csv_writer.writerow([x, y, "1", current_time])161 162 if polygon2_camera.contains(point): # cameraごとにエリアが異なるため、area2に人がいた場合163 area_count2 += 1164 if int(frame_count % cap.get(cv2.CAP_PROP_FPS)) == 0: # 1秒ごとに各idの位置座標を記録165 with open(track_id_to_csv[track_id], 'a', newline='') as csvfile:166 csv_writer = csv.writer(csvfile)167 current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")168 csv_writer.writerow([x, y, "2", current_time])169 170 # 現在の時刻を取得171 current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")172 173 if int(frame_count % cap.get(cv2.CAP_PROP_FPS)) == 0: # 1秒ごとに各エリアの人数を記録174 # CSVファイルにデータを書き込み175 with open(csv_file_path, mode='a', newline='') as csv_file:176 csv_writer = csv.writer(csv_file)177 csv_writer.writerow([camera_name, count, area_count1, area_count2, current_time])178 179 # 統一されたフォント設定で表示180 cv2.putText(annotated_frame, f"count: {count}", (10, 30), font, font_scale, font_color, font_thickness)181 cv2.putText(annotated_frame, f"area1: {area_count1}", (10, 90), font, font_scale, (0, 0, 255), font_thickness)182 cv2.putText(annotated_frame, f"area2: {area_count2}", (10, 150), font, font_scale, (255, 0, 0), font_thickness)183 cv2.putText(annotated_frame, current_time, (450, 620), font, 0.5, (0, 0, 255), 2)184 185 cv2.imshow(f"{camera_name} YOLOv8 Tracking", annotated_frame)186 187 if cv2.waitKey(1) & 0xFF == ord("q"):188 break189 else:190 break191 192 cap.release()193 cv2.destroyAllWindows()194 195threads = []196 197# 各カメラを別スレッドで処理198for camera_name, camera_source in camera_sources:199 thread = threading.Thread(target=process_camera, args=(camera_name, camera_source))200 thread.start()201 threads.append(thread)202 203# 全てのスレッドが終了するのを待機204for thread in threads:205 thread.join()206 207
0 コメント