Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactored Automated Monitoring Script w/PJ #3

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions LED.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import RPi.GPIO as GPIO
import time

def run_LEDS():
# Set the GPIO mode
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.cleanup()

# Set the GPIO pins for the LEDs
UV_LED_PIN = 22 # UV light
WHITE_LED_PIN = 17 # White light


# Set the GPIO pins as outputs
GPIO.setup(UV_LED_PIN, GPIO.OUT)
GPIO.setup(WHITE_LED_PIN, GPIO.OUT)

# Turn off the LEDs
GPIO.output(UV_LED_PIN, GPIO.LOW)
GPIO.output(WHITE_LED_PIN, GPIO.LOW) # Turn off the new LED
print("LEDs OFF")

time.sleep(1)

# Turn on the UV LED
GPIO.output(UV_LED_PIN, GPIO.HIGH)
print("UV LED ON - Attracting bugs")

# Wait for 5 minutes before turning on the white LED
time.sleep(5*60) # 300 seconds = 5 minutes

# Turn on the white LED
GPIO.output(WHITE_LED_PIN, GPIO.HIGH)
print("White LED ON")

time.sleep(600)

# Turn off the LEDs
GPIO.output(UV_LED_PIN, GPIO.LOW)
GPIO.output(WHITE_LED_PIN, GPIO.LOW) # Turn off the new LED
print("LEDs OFF")

if __name__ == "__main__":
run_LEDS()
27 changes: 27 additions & 0 deletions LEDoff.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import RPi.GPIO as GPIO
import time

def run_LEDS():
# Set the GPIO mode
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.cleanup()

# Set the GPIO pins for the LEDs
UV_LED_PIN = 22 # UV light
WHITE_LED_PIN = 17 # White light


# Set the GPIO pins as outputs
GPIO.setup(UV_LED_PIN, GPIO.OUT)
GPIO.setup(WHITE_LED_PIN, GPIO.OUT)

# Turn off the LEDs
GPIO.output(UV_LED_PIN, GPIO.LOW)
GPIO.output(WHITE_LED_PIN, GPIO.LOW) # Turn off the new LED
print("LEDs OFF")

time.sleep(5)

if __name__ == "__main__":
run_LEDS()
70 changes: 70 additions & 0 deletions main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import time
import subprocess
import threading
import logging
import signal

def run_led_script():
try:
import LED
logging.info("LED script running...")
LED.run_LEDS()
logging.info("LED script finished.")
except Exception as e:
logging.error(f"Error running LED script: {e}")
raise

def capture_script():
try:
logging.info("Capture script starting...")
process = subprocess.Popen(["python3", "insect-detect/yolo_tracker_save_hqsync.py --4k"])
return process
except Exception as e:
logging.error(f"Error running capture script: {e}")
raise

def terminate_process(process):
try:
process.terminate()
process.wait(timeout=5)
logging.info("Capture script terminated.")
except Exception as e:
logging.error(f"Error terminating capture script: {e}")
raise

def main():
logging.basicConfig(level=logging.INFO)
start_time = time.time()
duration = 2 * 60 * 60 # 2 hours in seconds

while time.time() - start_time < duration:
# Start LED script in a separate thread
led_thread = threading.Thread(target=run_led_script)
logging.info("Starting LED script")
led_thread.start()

# Wait for 5 minutes before starting the capture script
time.sleep(5 * 60)

# Start capture script in a separate process
logging.info("Starting capture script")
capture_process = capture_script()

# Run capture script for 15-20 minutes
capture_duration = 15 * 60 # 15 minutes in seconds
time.sleep(capture_duration)

# Terminate the capture script
logging.info("Terminating capture script")
terminate_process(capture_process)

# Wait for the LED script to finish if it's still running
led_thread.join()

# Wait for 10 minutes break before the next cycle
time.sleep(10 * 60)

logging.info("Main script execution complete after 2 hours")

if __name__ == "__main__":
main()
Binary file not shown.
Binary file added refactored/__pycache__/data_log.cpython-39.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added refactored/__pycache__/logging_setup.cpython-39.pyc
Binary file not shown.
Binary file added refactored/__pycache__/model_config.cpython-39.pyc
Binary file not shown.
Binary file not shown.
Binary file added refactored/__pycache__/run.cpython-39.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
25 changes: 25 additions & 0 deletions refactored/argument_parser.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import argparse

def parse_arguments():
"""
Parses command-line arguments for the application.
"""
parser = argparse.ArgumentParser(description="Run object detection and tracking.")
parser.add_argument("-4k", "--four_k_resolution", action="store_true",
help="crop detections from (+ save HQ frames in) 4K resolution; default = 1080p")
parser.add_argument("-crop", "--crop_bbox", choices=["square", "tight"], default="square", type=str,
help="save cropped detections with aspect ratio 1:1 ('-crop square') or \
keep original bbox size with variable aspect ratio ('-crop tight')")
parser.add_argument("-raw", "--save_raw_frames", action="store_true",
help="additionally save full raw HQ frames in separate folder (e.g., for training data)")
parser.add_argument("-overlay", "--save_overlay_frames", action="store_true",
help="additionally save full HQ frames with overlay (bbox + info) in separate folder")
parser.add_argument("-log", "--save_logs", action="store_true",
help="save RPi CPU + OAK chip temperature, RPi available memory (MB) + \
CPU utilization (%) and battery info to .csv file")

args = parser.parse_args()
if args.save_logs:
from apscheduler.schedulers.background import BackgroundScheduler
from gpiozero import CPUTemperature
return args
39 changes: 39 additions & 0 deletions refactored/capture.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#!/usr/bin/env python3

from logging_setup import setup_logging
from argument_parser import parse_arguments
from power_management import check_system_resources
from setup_pipeline import create_pipeline
from setup_directories import setup_directories
from data_management import store_data
from run import run

# Other imports remain the same
# import csv, json, subprocess, sys, time, traceback, etc.

latest_images = {}
image_count = {} # Dictionary to keep track of image count for each track.id
webhook_url = "https://nytelyfe-402203.uc.r.appspot.com/upload" # Webhook URL

def capture(args):

# Setup logging first
logger = setup_logging()

# Check system resources and manage power
pijuice, chargelevel_start = check_system_resources(logger)

# Create the DepthAI pipeline
pipeline, labels = create_pipeline(args.four_k_resolution)

# Set up data directories
save_path, rec_id, rec_start = setup_directories(labels, args.save_raw_frames, args.save_overlay_frames)

run(args.save_logs, args.save_raw_frames, args.save_overlay_frames, args.crop_bbox, args.four_k_resolution, webhook_url, latest_images, image_count, labels, pijuice, chargelevel_start, logger, pipeline, rec_id, rec_start, save_path)


if __name__ == "__main__":
# Parse the command-line arguments
args = parse_arguments()
# Pass the parsed arguments to the capture function
capture(args)
70 changes: 70 additions & 0 deletions refactored/data_log.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import csv
import time
from datetime import datetime
import psutil
from pathlib import Path
from pijuice import PiJuice # If using PiJuice in this context
from gpiozero import CPUTemperature
import pandas as pd

def record_log(rec_id, rec_start, save_path, chargelevel_start, chargelevel, start_time):
"""Write information about each recording interval to .csv file."""
try:
df_meta = pd.read_csv(f"{save_path}/metadata_{rec_start}.csv", encoding="utf-8")
unique_ids = df_meta["track_ID"].nunique()
except pd.errors.EmptyDataError:
unique_ids = 0
with open(f"{save_path}/record_log.csv", "a", encoding="utf-8") as log_rec_file:
log_rec = csv.DictWriter(log_rec_file, fieldnames=[
"rec_ID", "record_start_date", "record_start_time", "record_end_time", "record_time_min",
"num_crops", "num_IDs", "disk_free_gb", "chargelevel_start", "chargelevel_end"
])
if log_rec_file.tell() == 0:
log_rec.writeheader()
logs_rec = {
"rec_ID": rec_id,
"record_start_date": rec_start[:8],
"record_start_time": rec_start[9:],
"record_end_time": datetime.now().strftime("%H-%M"),
"record_time_min": round((time.monotonic() - start_time) / 60, 2),
"num_crops": len(list(Path(f"{save_path}/cropped").glob("**/*.jpg"))),
"num_IDs": unique_ids,
"disk_free_gb": round(psutil.disk_usage("/").free / 1073741824, 1),
"chargelevel_start": chargelevel_start,
"chargelevel_end": chargelevel # This needs to be defined or passed to the function
}
log_rec.writerow(logs_rec)


def save_logs(rec_id, rec_start, chargelevel, pijuice, device):
"""Write recording ID, time, RPi CPU + OAK chip temperature, RPi available memory (MB) +
CPU utilization (%) and PiJuice battery info + temp to .csv file."""
with open(f"insect-detect/data/{rec_start[:8]}/info_log_{rec_start[:8]}.csv", "a", encoding="utf-8") as log_info_file:
log_info = csv.DictWriter(log_info_file, fieldnames=[
"rec_ID", "timestamp", "temp_pi", "temp_oak", "pi_mem_available", "pi_cpu_used",
"power_input", "charge_status", "charge_level", "temp_batt", "voltage_batt_mV"
])
if log_info_file.tell() == 0:
log_info.writeheader()
try:
temp_oak = round(device.getChipTemperature().average) # This might need to be adjusted based on actual method to get temperature
except RuntimeError:
temp_oak = "NA"
try:
logs_info = {
"rec_ID": rec_id,
"timestamp": datetime.now().strftime("%Y%m%d_%H-%M-%S"),
"temp_pi": round(CPUTemperature().temperature),
"temp_oak": temp_oak,
"pi_mem_available": round(psutil.virtual_memory().available / 1048576),
"pi_cpu_used": psutil.cpu_percent(interval=None),
"power_input": pijuice.status.GetStatus().get("data", {}).get("powerInput", "NA"),
"charge_status": pijuice.status.GetStatus().get("data", {}).get("battery", "NA"),
"charge_level": chargelevel,
"temp_batt": pijuice.status.GetBatteryTemperature().get("data", "NA"),
"voltage_batt_mV": pijuice.status.GetBatteryVoltage().get("data", "NA")
}
except IndexError:
logs_info = {}
log_info.writerow(logs_info)
log_info_file.flush()
113 changes: 113 additions & 0 deletions refactored/data_management.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import csv
from datetime import datetime
import cv2
import requests
from image_processing import frame_norm, make_bbox_square

def store_data(frame, tracks, rec_id, rec_start, save_path, labels, save_raw_frames, save_overlay_frames, crop_bbox, four_k_resolution, webhook_url, latest_images, image_count):

"""Save cropped detections (+ full HQ frames) to .jpg and tracker output to metadata .csv."""
with open(f"{save_path}/metadata_{rec_start}.csv", "a", encoding="utf-8") as metadata_file:
metadata = csv.DictWriter(metadata_file, fieldnames=
["rec_ID", "timestamp", "label", "confidence", "track_ID",
"x_min", "y_min", "x_max", "y_max", "file_path"])
if metadata_file.tell() == 0:
metadata.writeheader() # write header only once

# Save full raw HQ frame (e.g. for training data collection)
if save_raw_frames:
for track in tracks:
if track == tracks[-1]:
timestamp = datetime.now().strftime("%Y%m%d_%H-%M-%S.%f")
raw_path = f"{save_path}/raw/{timestamp}_raw.jpg"
cv2.imwrite(raw_path, frame)
#cv2.imwrite(raw_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 70])

for track in tracks:
# Don't save cropped detections if tracking status == "NEW" or "LOST" or "REMOVED"
if track.status.name == "TRACKED":

# Save detections cropped from HQ frame to .jpg
bbox = frame_norm(frame, (track.srcImgDetection.xmin, track.srcImgDetection.ymin,
track.srcImgDetection.xmax, track.srcImgDetection.ymax))
if crop_bbox == "square":
det_crop = make_bbox_square(frame, bbox, four_k_resolution)
else:
det_crop = frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]
label = labels[track.srcImgDetection.label]
timestamp = datetime.now().strftime("%Y%m%d_%H-%M-%S.%f")
crop_path = f"{save_path}/cropped/{label}/{timestamp}_{track.id}_crop.jpg"
cv2.imwrite(crop_path, det_crop)

# Update the latest image for this track.id
latest_images[track.id] = crop_path

# Update image count for this track.id
image_count[track.id] = image_count.get(track.id, 0) + 1
print(f"Image count for track.id {track.id}: {image_count[track.id]}")



if image_count[track.id] == 3:
try:
with open(crop_path, 'rb') as f:
#Open metadata CSV
#with open(f"{save_path}/metadata_{rec_start}.csv", 'rb') as metadata_file:
# Prepare the files to be sent
files = {'file': f}
#'metadata': ('metadata.csv', metadata_file)

data = {
'accountID': 'Y7I3Jmp7dCXoank4WXKeTCSoPDp1' # Replace with your actual account ID
}
response = requests.post(webhook_url, files=files, data=data)

if response.status_code == 200:
print(f"Successfully sent {crop_path} to webhook.")
else:
print(f"Failed to send image to webhook. Status code: {response.status_code}")
except Exception as e:
print(f"An error occurred: {e}")

# Save corresponding metadata to .csv file for each cropped detection
data = {
"rec_ID": rec_id,
"timestamp": timestamp,
"label": label,
"confidence": round(track.srcImgDetection.confidence, 2),
"track_ID": track.id,
"x_min": round(track.srcImgDetection.xmin, 4),
"y_min": round(track.srcImgDetection.ymin, 4),
"x_max": round(track.srcImgDetection.xmax, 4),
"y_max": round(track.srcImgDetection.ymax, 4),
"file_path": crop_path

}
metadata.writerow(data)
metadata_file.flush() # write data immediately to .csv to avoid potential data loss

# Save full HQ frame with overlay (bounding box, label, confidence, tracking ID) drawn on frame
if save_overlay_frames:
# Text position, font size and thickness optimized for 1920x1080 px HQ frame size
if not four_k_resolution:
cv2.putText(frame, labels[track.srcImgDetection.label], (bbox[0], bbox[3] + 28),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 255), 2)
cv2.putText(frame, f"{round(track.srcImgDetection.confidence, 2)}", (bbox[0], bbox[3] + 55),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
cv2.putText(frame, f"ID:{track.id}", (bbox[0], bbox[3] + 92),
cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255, 255, 255), 2)
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2)
# Text position, font size and thickness optimized for 3840x2160 px HQ frame size
else:
cv2.putText(frame, labels[track.srcImgDetection.label], (bbox[0], bbox[3] + 48),
cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 3)
cv2.putText(frame, f"{round(track.srcImgDetection.confidence, 2)}", (bbox[0], bbox[3] + 98),
cv2.FONT_HERSHEY_SIMPLEX, 1.6, (255, 255, 255), 3)
cv2.putText(frame, f"ID:{track.id}", (bbox[0], bbox[3] + 164),
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 3)
if track == tracks[-1]:
timestamp = datetime.now().strftime("%Y%m%d_%H-%M-%S.%f")
overlay_path = f"{save_path}/overlay/{timestamp}_overlay.jpg"
cv2.imwrite(overlay_path, frame)
#cv2.imwrite(overlay_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 70])
Loading