diff --git a/README.md b/README.md index 6dfee6b..62cc944 100644 --- a/README.md +++ b/README.md @@ -148,6 +148,31 @@ source venv/bin/activate pip install -r requirements.txt ``` +**For Linux (Debian/Ubuntu based):** + +```bash +# Install system dependencies (if needed) +sudo apt-get update +sudo apt-get install python3-venv python3-pip ffmpeg git + +# Create and activate virtual environment +python3 -m venv venv +source venv/bin/activate + +# Install Python dependencies +# (Important: Ensure you have CPU-only versions if not using GPU) +pip uninstall -y torch torchvision torchaudio onnxruntime* +pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu + +# Install webcam utilities (optional but helpful for troubleshooting) +sudo apt-get install v4l-utils + +# Ensure your user is in the 'video' group for webcam access +# (You might need to log out and log back in after adding) +sudo adduser $USER video +groups +``` + **For macOS:** Apple Silicon (M1/M2/M3) requires specific setup: @@ -181,7 +206,7 @@ source venv/bin/activate pip install -r requirements.txt ``` -**Run:** If you don't have a GPU, you can run Deep-Live-Cam using `python run.py`. Note that initial execution will download models (~300MB). +**Run:** If you don't have a GPU, you can run Deep-Live-Cam using `python run.py` or `python run.py --execution-provider cpu`. Note that initial execution will download models (~300MB). Performance will be very low (potentially < 1 FPS) without a compatible GPU. ### GPU Acceleration diff --git a/modules/core.py b/modules/core.py index b6ef9b8..ab2d6bd 100644 --- a/modules/core.py +++ b/modules/core.py @@ -4,7 +4,11 @@ import sys if any(arg.startswith('--execution-provider') for arg in sys.argv): os.environ['OMP_NUM_THREADS'] = '1' # reduce tensorflow log level -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +# disable GPU for tensorflow when using CPU provider +if '--execution-provider' in sys.argv and 'cpu' in sys.argv[sys.argv.index('--execution-provider') + 1]: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + import warnings from typing import List import platform @@ -81,6 +85,13 @@ def parse_args() -> None: modules.globals.execution_threads = args.execution_threads modules.globals.lang = args.lang + # If using CPU provider, ensure we're not using any GPU features + if 'cpu' in args.execution_provider: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.set_device('cpu') + #for ENHANCER tumbler: if 'face_enhancer' in args.frame_processor: modules.globals.fp_ui['face_enhancer'] = True diff --git a/modules/video_capture.py b/modules/video_capture.py index cab223d..962e849 100644 --- a/modules/video_capture.py +++ b/modules/video_capture.py @@ -42,15 +42,31 @@ class VideoCapturer: for dev_id, backend in capture_methods: try: + print(f"Trying device {dev_id} with backend {backend}") self.cap = cv2.VideoCapture(dev_id, backend) if self.cap.isOpened(): + print(f"Successfully opened device {dev_id} with backend {backend}") break self.cap.release() - except Exception: + except Exception as e: + print(f"Failed to open device {dev_id} with backend {backend}: {str(e)}") continue else: # Unix-like systems (Linux/Mac) capture method - self.cap = cv2.VideoCapture(self.device_index) + # Try device 0 first, then the specified device index if different + capture_methods = [(0, cv2.CAP_V4L2), (self.device_index, cv2.CAP_V4L2)] if self.device_index != 0 else [(0, cv2.CAP_V4L2)] + + for dev_id, backend in capture_methods: + try: + print(f"Trying device {dev_id} with backend {backend}") + self.cap = cv2.VideoCapture(dev_id, backend) + if self.cap.isOpened(): + print(f"Successfully opened device {dev_id} with backend {backend}") + break + self.cap.release() + except Exception as e: + print(f"Failed to open device {dev_id} with backend {backend}: {str(e)}") + continue if not self.cap or not self.cap.isOpened(): raise RuntimeError("Failed to open camera") @@ -60,6 +76,12 @@ class VideoCapturer: self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) self.cap.set(cv2.CAP_PROP_FPS, fps) + # Print actual camera settings + actual_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH) + actual_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + actual_fps = self.cap.get(cv2.CAP_PROP_FPS) + print(f"Camera initialized with: {actual_width}x{actual_height} @ {actual_fps}fps") + self.is_running = True return True