diff --git a/README.md b/README.md
index b9a61ac..af73c7c 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,13 @@
+
Deep Live Cam
-
+
+ Real-time face swap and video deepfake with a single click and only a single image.
+
-## Deep Live Cam
-
-Real-time face swap and video deepfake with a single click and only a single image.
+
+
+
+
## Disclaimer
@@ -14,40 +18,18 @@ We are aware of the potential for unethical applications and are committed to pr
Users are expected to use this software responsibly and legally. If using a real person's face, obtain their consent and clearly label any output as a deepfake when sharing online. We are not responsible for end-user actions.
-## Features
-
-### Resizable Preview Window
-
-Dynamically improve performance using the `--live-resizable` parameter.
-
-
-
-### Face Mapping
-
-Track and change faces on the fly.
-
-
-
-**Source Video:**
-
-
-
-**Enable Face Mapping:**
-
-
-
-**Map the Faces:**
-
-
-
-**See the Magic!**
-
## Quick Start (Windows / Nvidia)
-[Download pre-built version with CUDA support](https://hacksider.gumroad.com/l/vccdmm)
+[](https://hacksider.gumroad.com/l/vccdmm)
+
+
+
+
+
+[Download latest pre-built version with CUDA support](https://hacksider.gumroad.com/l/vccdmm) - No Manual Installation/Downloading required.
## Installation (Manual)
-
+**Please be aware that the installation needs technical skills and is NOT for beginners, consider downloading the prebuilt. Please do NOT open platform and installation related issues on GitHub before discussing it on the discord server.**
### Basic Installation (CPU)
This is more likely to work on your computer but will be slower as it utilizes the CPU.
@@ -69,7 +51,7 @@ https://github.com/hacksider/Deep-Live-Cam.git
**3. Download Models**
1. [GFPGANv1.4](https://huggingface.co/hacksider/deep-live-cam/resolve/main/GFPGANv1.4.pth)
-2. [inswapper_128_fp16.onnx](https://huggingface.co/hacksider/deep-live-cam/resolve/main/inswapper_128_fp16.onnx) (Note: Use this [replacement version](https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx) if you encounter issues)
+2. [inswapper_128_fp16.onnx](https://huggingface.co/hacksider/deep-live-cam/resolve/main/inswapper_128.onnx) (Note: Use this [replacement version](https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx) if you encounter issues)
Place these files in the "**models**" folder.
@@ -179,6 +161,35 @@ python run.py --execution-provider openvino

+## Features
+
+### Resizable Preview Window
+
+Dynamically improve performance using the `--live-resizable` parameter.
+
+
+
+### Face Mapping
+
+Track and change faces on the fly.
+
+
+
+**Source Video:**
+
+
+
+**Enable Face Mapping:**
+
+
+
+**Map the Faces:**
+
+
+
+**See the Magic!**
+
+
## Command Line Arguments
```
@@ -385,3 +396,5 @@ This is an open-source project developed in our free time. Updates may be delaye
+
+
diff --git a/docs/demo.gif b/docs/demo.gif
deleted file mode 100644
index def2909..0000000
Binary files a/docs/demo.gif and /dev/null differ
diff --git a/docs/gui-demo.jpg b/docs/gui-demo.jpg
deleted file mode 100644
index 660b10f..0000000
Binary files a/docs/gui-demo.jpg and /dev/null differ
diff --git a/modules/metadata.py b/modules/metadata.py
index cf395aa..d823f6a 100644
--- a/modules/metadata.py
+++ b/modules/metadata.py
@@ -1,3 +1,3 @@
name = 'Deep Live Cam'
-version = '1.4.0'
+version = '1.5.0'
edition = 'Portable'
diff --git a/modules/processors/frame/face_swapper.py b/modules/processors/frame/face_swapper.py
index 6fd0760..35a4090 100644
--- a/modules/processors/frame/face_swapper.py
+++ b/modules/processors/frame/face_swapper.py
@@ -18,7 +18,7 @@ NAME = 'DLC.FACE-SWAPPER'
def pre_check() -> bool:
download_directory_path = resolve_relative_path('../models')
- conditional_download(download_directory_path, ['https://huggingface.co/hacksider/deep-live-cam/blob/main/inswapper_128_fp16.onnx'])
+ conditional_download(download_directory_path, ['https://huggingface.co/hacksider/deep-live-cam/blob/main/inswapper_128.onnx'])
return True
@@ -40,7 +40,7 @@ def get_face_swapper() -> Any:
with THREAD_LOCK:
if FACE_SWAPPER is None:
- model_path = resolve_relative_path('../models/inswapper_128_fp16.onnx')
+ model_path = resolve_relative_path('../models/inswapper_128.onnx')
FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=modules.globals.execution_providers)
return FACE_SWAPPER
diff --git a/modules/ui.json b/modules/ui.json
new file mode 100644
index 0000000..0954578
--- /dev/null
+++ b/modules/ui.json
@@ -0,0 +1,158 @@
+{
+ "CTk": {
+ "fg_color": ["gray95", "gray10"]
+ },
+ "CTkToplevel": {
+ "fg_color": ["gray95", "gray10"]
+ },
+ "CTkFrame": {
+ "corner_radius": 0,
+ "border_width": 0,
+ "fg_color": ["gray90", "gray13"],
+ "top_fg_color": ["gray85", "gray16"],
+ "border_color": ["gray65", "gray28"]
+ },
+ "CTkButton": {
+ "corner_radius": 0,
+ "border_width": 0,
+ "fg_color": ["#2aa666", "#1f538d"],
+ "hover_color": ["#3cb666", "#14375e"],
+ "border_color": ["#3e4a40", "#949A9F"],
+ "text_color": ["#f3faf6", "#f3faf6"],
+ "text_color_disabled": ["gray74", "gray60"]
+ },
+ "CTkLabel": {
+ "corner_radius": 0,
+ "fg_color": "transparent",
+ "text_color": ["gray14", "gray84"]
+ },
+ "CTkEntry": {
+ "corner_radius": 0,
+ "border_width": 2,
+ "fg_color": ["#F9F9FA", "#343638"],
+ "border_color": ["#979DA2", "#565B5E"],
+ "text_color": ["gray14", "gray84"],
+ "placeholder_text_color": ["gray52", "gray62"]
+ },
+ "CTkCheckbox": {
+ "corner_radius": 0,
+ "border_width": 3,
+ "fg_color": ["#2aa666", "#1f538d"],
+ "border_color": ["#3e4a40", "#949A9F"],
+ "hover_color": ["#3cb666", "#14375e"],
+ "checkmark_color": ["#f3faf6", "gray90"],
+ "text_color": ["gray14", "gray84"],
+ "text_color_disabled": ["gray60", "gray45"]
+ },
+ "CTkSwitch": {
+ "corner_radius": 1000,
+ "border_width": 3,
+ "button_length": 0,
+ "fg_color": ["#939BA2", "#4A4D50"],
+ "progress_color": ["#2aa666", "#1f538d"],
+ "button_color": ["gray36", "#D5D9DE"],
+ "button_hover_color": ["gray20", "gray100"],
+ "text_color": ["gray14", "gray84"],
+ "text_color_disabled": ["gray60", "gray45"]
+ },
+ "CTkRadiobutton": {
+ "corner_radius": 1000,
+ "border_width_checked": 6,
+ "border_width_unchecked": 3,
+ "fg_color": ["#2aa666", "#1f538d"],
+ "border_color": ["#3e4a40", "#949A9F"],
+ "hover_color": ["#3cb666", "#14375e"],
+ "text_color": ["gray14", "gray84"],
+ "text_color_disabled": ["gray60", "gray45"]
+ },
+ "CTkProgressBar": {
+ "corner_radius": 1000,
+ "border_width": 0,
+ "fg_color": ["#939BA2", "#4A4D50"],
+ "progress_color": ["#2aa666", "#1f538d"],
+ "border_color": ["gray", "gray"]
+ },
+ "CTkSlider": {
+ "corner_radius": 1000,
+ "button_corner_radius": 1000,
+ "border_width": 6,
+ "button_length": 0,
+ "fg_color": ["#939BA2", "#4A4D50"],
+ "progress_color": ["gray40", "#AAB0B5"],
+ "button_color": ["#2aa666", "#1f538d"],
+ "button_hover_color": ["#3cb666", "#14375e"]
+ },
+ "CTkOptionMenu": {
+ "corner_radius": 0,
+ "fg_color": ["#2aa666", "#1f538d"],
+ "button_color": ["#3cb666", "#14375e"],
+ "button_hover_color": ["#234567", "#1e2c40"],
+ "text_color": ["#f3faf6", "#f3faf6"],
+ "text_color_disabled": ["gray74", "gray60"]
+ },
+ "CTkComboBox": {
+ "corner_radius": 0,
+ "border_width": 2,
+ "fg_color": ["#F9F9FA", "#343638"],
+ "border_color": ["#979DA2", "#565B5E"],
+ "button_color": ["#979DA2", "#565B5E"],
+ "button_hover_color": ["#6E7174", "#7A848D"],
+ "text_color": ["gray14", "gray84"],
+ "text_color_disabled": ["gray50", "gray45"]
+ },
+ "CTkScrollbar": {
+ "corner_radius": 1000,
+ "border_spacing": 4,
+ "fg_color": "transparent",
+ "button_color": ["gray55", "gray41"],
+ "button_hover_color": ["gray40", "gray53"]
+ },
+ "CTkSegmentedButton": {
+ "corner_radius": 0,
+ "border_width": 2,
+ "fg_color": ["#979DA2", "gray29"],
+ "selected_color": ["#2aa666", "#1f538d"],
+ "selected_hover_color": ["#3cb666", "#14375e"],
+ "unselected_color": ["#979DA2", "gray29"],
+ "unselected_hover_color": ["gray70", "gray41"],
+ "text_color": ["#f3faf6", "#f3faf6"],
+ "text_color_disabled": ["gray74", "gray60"]
+ },
+ "CTkTextbox": {
+ "corner_radius": 0,
+ "border_width": 0,
+ "fg_color": ["gray100", "gray20"],
+ "border_color": ["#979DA2", "#565B5E"],
+ "text_color": ["gray14", "gray84"],
+ "scrollbar_button_color": ["gray55", "gray41"],
+ "scrollbar_button_hover_color": ["gray40", "gray53"]
+ },
+ "CTkScrollableFrame": {
+ "label_fg_color": ["gray80", "gray21"]
+ },
+ "DropdownMenu": {
+ "fg_color": ["gray90", "gray20"],
+ "hover_color": ["gray75", "gray28"],
+ "text_color": ["gray14", "gray84"]
+ },
+ "CTkFont": {
+ "macOS": {
+ "family": "Avenir",
+ "size": 18,
+ "weight": "normal"
+ },
+ "Windows": {
+ "family": "Corbel",
+ "size": 18,
+ "weight": "normal"
+ },
+ "Linux": {
+ "family": "Montserrat",
+ "size": 18,
+ "weight": "normal"
+ }
+ },
+ "URL": {
+ "text_color": ["gray74", "gray60"]
+ }
+}
diff --git a/modules/ui.py b/modules/ui.py
index dc497df..ec2210a 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -5,7 +5,7 @@ from typing import Callable, Tuple
import cv2
from PIL import Image, ImageOps
import tkinterdnd2 as tkdnd
-
+import time
import modules.globals
import modules.metadata
from modules.face_analyser import (
@@ -25,6 +25,10 @@ from modules.utilities import (
has_image_extension,
)
+os.environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
+os.environ["QT_SCREEN_SCALE_FACTORS"] = "1"
+os.environ["QT_SCALE_FACTOR"] = "1"
+
ROOT = None
POPUP = None
POPUP_LIVE = None
@@ -219,6 +223,7 @@ def create_root(
root.configure(bg="#1a1a1a")
root.protocol("WM_DELETE_WINDOW", lambda: destroy())
root.resizable(True, True)
+ root.attributes("-alpha", 1.0) # Set window opacity to fully opaque
main_frame = ctk.CTkFrame(root, fg_color="#1a1a1a")
main_frame.pack(fill="both", expand=True, padx=20, pady=20)
@@ -677,6 +682,29 @@ def create_preview(parent: ctk.CTkToplevel) -> ctk.CTkToplevel:
)
preview_slider.pack(fill="x", padx=20, pady=10)
+ last_update_time = 0
+ debounce_delay = 0.1 # Adjust this delay as needed (in seconds)
+
+ def on_key_press(event):
+ nonlocal last_update_time
+
+ current_time = time.time()
+ if current_time - last_update_time > debounce_delay:
+ current_frame = int(preview_slider.get())
+ if event.keysym == "Left":
+ new_frame = max(0, current_frame - 1)
+ elif event.keysym == "Right":
+ new_frame = min(int(preview_slider.cget("to")), current_frame + 1)
+ else:
+ return # Ignore other key presses
+
+ preview_slider.set(new_frame)
+ update_preview(new_frame)
+ last_update_time = current_time
+
+ preview.bind("", on_key_press)
+ preview.bind("", on_key_press)
+
return preview
@@ -874,20 +902,53 @@ def init_preview() -> None:
preview_slider.configure(to=video_frame_total)
preview_slider.pack(fill="x")
preview_slider.set(0)
+ # Disable slider if it's an image
+ if is_image(modules.globals.target_path):
+ preview_slider.configure(state="disabled")
+ else:
+ preview_slider.configure(state="normal")
def update_preview(frame_number: int = 0) -> None:
if modules.globals.source_path and modules.globals.target_path:
update_status("Processing...")
- temp_frame = get_video_frame(modules.globals.target_path, frame_number)
+
+ # Debug: Print the target path and frame number
+ print(
+ f"Target path: {modules.globals.target_path}, Frame number: {frame_number}"
+ )
+
+ temp_frame = None
+ if is_video(modules.globals.target_path):
+ temp_frame = get_video_frame(modules.globals.target_path, frame_number)
+ elif is_image(modules.globals.target_path):
+ temp_frame = cv2.imread(modules.globals.target_path)
+
+ # Debug: Check if temp_frame is None
+ if temp_frame is None:
+ print("Error: temp_frame is None")
+ update_status("Error: Could not read frame from video or image.")
+ return
+
if modules.globals.nsfw_filter and check_and_ignore_nsfw(temp_frame):
return
+
for frame_processor in get_frame_processors_modules(
modules.globals.frame_processors
):
+ # Debug: Print the type of frame_processor
+ print(f"Processing frame with: {type(frame_processor).__name__}")
+
temp_frame = frame_processor.process_frame(
get_one_face(cv2.imread(modules.globals.source_path)), temp_frame
)
+
+ # Debug: Check if temp_frame is None after processing
+ if temp_frame is None:
+ print("Error: temp_frame is None after processing")
+ update_status("Error: Frame processing failed.")
+ return
+
image = Image.fromarray(cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB))
image = ImageOps.contain(
image, (PREVIEW_MAX_WIDTH, PREVIEW_MAX_HEIGHT), Image.LANCZOS
diff --git a/requirements.txt b/requirements.txt
index f747432..34731f8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,10 +12,8 @@ torch==2.0.1+cu118; sys_platform != 'darwin'
torch==2.0.1; sys_platform == 'darwin'
torchvision==0.15.2+cu118; sys_platform != 'darwin'
torchvision==0.15.2; sys_platform == 'darwin'
-onnxruntime==1.18.0; sys_platform == 'darwin' and platform_machine != 'arm64'
onnxruntime-silicon==1.16.3; sys_platform == 'darwin' and platform_machine == 'arm64'
-onnxruntime-gpu==1.18.0; sys_platform != 'darwin'
-tensorflow==2.13.0rc1; sys_platform == 'darwin'
+onnxruntime-gpu==1.16.3; sys_platform != 'darwin'
tensorflow==2.12.1; sys_platform != 'darwin'
opennsfw2==0.10.2
protobuf==4.23.2