Skip to content

Commit

Permalink
v0.1614.3 - added preferred CUDA selection to config.ini
Browse files Browse the repository at this point in the history
  • Loading branch information
FlyingFathead committed Dec 24, 2024
1 parent 0fe2e55 commit 79afd52
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 9 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,8 @@ Use `utils/batch_humdet_yolo8_opencv2.py` to run YOLOv8 batch detection on direc
- Add hooks for sending detections to web servers or APIs

## Changelog
- **v0.1614.3**
- preferred CUDA device can now be selected under `[hardware]` from `config.ini`
- **v0.1614**
- More reactive detection aggregation updates to webui
- **v0.1613**
Expand Down
8 changes: 8 additions & 0 deletions config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,14 @@ default_conf_threshold = 0.7
# Default model variant to use
default_model_variant = yolov8m

[hardware]
# Preferred CUDA device ID
# e.g., 0 means "cuda:0", 1 means "cuda:1"
cuda_device_id = 1
# If the chosen device isn't available, automatically fallback to the next available device?
# true/false
cuda_fallback_if_unavailable = true

[input]
use_webcam = false
webcam_index = 0
Expand Down
2 changes: 1 addition & 1 deletion version.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# version.py

version_number = "0.1614.2 (Dec 3 2024)"
version_number = "0.1614.3 (Dec 24 2024)"
49 changes: 41 additions & 8 deletions yolov8_live_rtmp_stream_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,15 +263,47 @@ def setup_logging():

# Load the YOLOv8 model
def load_model(model_variant=DEFAULT_MODEL_VARIANT):
"""Loads the YOLO model and puts it onto the configured CUDA device (or CPU)."""
try:
model = YOLO(model_variant)
# Check if CUDA is available

# 1) Read from config
cuda_device_id = config.getint('hardware', 'cuda_device_id', fallback=0)
cuda_fallback_if_unavailable = config.getboolean('hardware', 'cuda_fallback_if_unavailable', fallback=True)

if torch.cuda.is_available():
model.to('cuda')
main_logger.info("Using CUDA for model inference.")
# 2) Check how many devices we have
num_gpus = torch.cuda.device_count()

if cuda_device_id < num_gpus:
try:
# Attempt to put the model on the user-specified device
device_str = f"cuda:{cuda_device_id}"
model.to(device_str)
main_logger.info(f"Using CUDA device {cuda_device_id} for model inference.")
except Exception as e:
main_logger.error(f"Failed to load model on cuda:{cuda_device_id}: {e}")
if cuda_fallback_if_unavailable and num_gpus > 0:
# Attempt fallback on the first available GPU (usually cuda:0)
main_logger.warning("Falling back to cuda:0")
model.to("cuda:0")
else:
main_logger.warning("No fallback GPU or fallback disabled. Using CPU instead.")
model.to('cpu')
else:
# The user-specified device doesn't exist
main_logger.warning(f"Requested cuda:{cuda_device_id}, but only {num_gpus} GPU(s) found.")
if cuda_fallback_if_unavailable and num_gpus > 0:
main_logger.warning("Falling back to cuda:0")
model.to("cuda:0")
else:
main_logger.warning("No fallback GPU or fallback disabled. Using CPU instead.")
model.to('cpu')
else:
model.to('cpu')
# No GPU present at all
main_logger.warning("CUDA not available, using CPU for model inference.")
model.to('cpu')

return model
except Exception as e:
logging.error(f"Error loading model {model_variant}: {e}")
Expand All @@ -297,10 +329,11 @@ def log_cuda_info():
logging.info(f" Compute Capability: {gpu_capability[0]}.{gpu_capability[1]}")
logging.info(f" Total Memory: {gpu_memory:.2f} GB")

# Log the current device being used
current_device = torch.cuda.current_device()
current_gpu_name = torch.cuda.get_device_name(current_device)
logging.info(f"Using CUDA Device {current_device}: {current_gpu_name}")
# // not in use after 0.1614.3, preferred CUDA device is now set in `config.ini`
# Log the current device being used
# current_device = torch.cuda.current_device()
# current_gpu_name = torch.cuda.get_device_name(current_device)
# logging.info(f"Using CUDA Device {current_device}: {current_gpu_name}")

# Function to get the base save directory (without date subdirs)
def get_base_save_dir():
Expand Down

0 comments on commit 79afd52

Please sign in to comment.