diff --git a/README.md b/README.md
index d5ffecb..6dfee6b 100644
--- a/README.md
+++ b/README.md
@@ -30,11 +30,11 @@ By using this software, you agree to these terms and commit to using it in a man
Users are expected to use this software responsibly and legally. If using a real person's face, obtain their consent and clearly label any output as a deepfake when sharing online. We are not responsible for end-user actions.
-## Exclusive v2.0 Quick Start - Pre-built (Windows / Nvidia)
+## Exclusive v2.0 Quick Start - Pre-built (Windows)
-
+
-##### This is the fastest build you can get if you have a discrete NVIDIA GPU.
+##### This is the fastest build you can get if you have a discrete NVIDIA or AMD GPU.
###### These Pre-builts are perfect for non-technical users or those who don't have time to, or can't manually install all the requirements. Just a heads-up: this is an open-source project, so you can also install it manually. This will be 60 days ahead on the open source version.
@@ -133,12 +133,20 @@ Place these files in the "**models**" folder.
We highly recommend using a `venv` to avoid issues.
+
For Windows:
```bash
python -m venv venv
venv\Scripts\activate
pip install -r requirements.txt
```
+For Linux:
+```bash
+# Ensure you use the installed Python 3.10
+python3 -m venv venv
+source venv/bin/activate
+pip install -r requirements.txt
+```
**For macOS:**
diff --git a/media/Download.png b/media/Download.png
new file mode 100644
index 0000000..c79397d
Binary files /dev/null and b/media/Download.png differ
diff --git a/media/download.png b/media/download.png
deleted file mode 100644
index 917d7d8..0000000
Binary files a/media/download.png and /dev/null differ
diff --git a/modules/processors/frame/core.py b/modules/processors/frame/core.py
index 7d76704..6d99fd1 100644
--- a/modules/processors/frame/core.py
+++ b/modules/processors/frame/core.py
@@ -42,18 +42,29 @@ def get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType
def set_frame_processors_modules_from_ui(frame_processors: List[str]) -> None:
global FRAME_PROCESSORS_MODULES
+ current_processor_names = [proc.__name__.split('.')[-1] for proc in FRAME_PROCESSORS_MODULES]
+
for frame_processor, state in modules.globals.fp_ui.items():
- if state == True and frame_processor not in frame_processors:
- frame_processor_module = load_frame_processor_module(frame_processor)
- FRAME_PROCESSORS_MODULES.append(frame_processor_module)
- modules.globals.frame_processors.append(frame_processor)
- if state == False:
+ if state == True and frame_processor not in current_processor_names:
try:
frame_processor_module = load_frame_processor_module(frame_processor)
- FRAME_PROCESSORS_MODULES.remove(frame_processor_module)
- modules.globals.frame_processors.remove(frame_processor)
- except:
- pass
+ FRAME_PROCESSORS_MODULES.append(frame_processor_module)
+ if frame_processor not in modules.globals.frame_processors:
+ modules.globals.frame_processors.append(frame_processor)
+ except SystemExit:
+ print(f"Warning: Failed to load frame processor {frame_processor} requested by UI state.")
+ except Exception as e:
+ print(f"Warning: Error loading frame processor {frame_processor} requested by UI state: {e}")
+
+ elif state == False and frame_processor in current_processor_names:
+ try:
+ module_to_remove = next((mod for mod in FRAME_PROCESSORS_MODULES if mod.__name__.endswith(f'.{frame_processor}')), None)
+ if module_to_remove:
+ FRAME_PROCESSORS_MODULES.remove(module_to_remove)
+ if frame_processor in modules.globals.frame_processors:
+ modules.globals.frame_processors.remove(frame_processor)
+ except Exception as e:
+ print(f"Warning: Error removing frame processor {frame_processor}: {e}")
def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_frames: Callable[[str, List[str], Any], None], progress: Any = None) -> None:
with ThreadPoolExecutor(max_workers=modules.globals.execution_threads) as executor:
diff --git a/modules/processors/frame/face_enhancer.py b/modules/processors/frame/face_enhancer.py
index 4e1fdff..de192e6 100644
--- a/modules/processors/frame/face_enhancer.py
+++ b/modules/processors/frame/face_enhancer.py
@@ -48,6 +48,17 @@ def pre_start() -> bool:
return True
+TENSORRT_AVAILABLE = False
+try:
+ import torch_tensorrt
+ TENSORRT_AVAILABLE = True
+except ImportError as im:
+ print(f"TensorRT is not available: {im}")
+ pass
+except Exception as e:
+ print(f"TensorRT is not available: {e}")
+ pass
+
def get_face_enhancer() -> Any:
global FACE_ENHANCER
@@ -55,16 +66,26 @@ def get_face_enhancer() -> Any:
if FACE_ENHANCER is None:
model_path = os.path.join(models_dir, "GFPGANv1.4.pth")
- match platform.system():
- case "Darwin": # Mac OS
- if torch.backends.mps.is_available():
- mps_device = torch.device("mps")
- FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1, device=mps_device) # type: ignore[attr-defined]
- else:
- FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined]
- case _: # Other OS
- FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined]
+ selected_device = None
+ device_priority = []
+ if TENSORRT_AVAILABLE and torch.cuda.is_available():
+ selected_device = torch.device("cuda")
+ device_priority.append("TensorRT+CUDA")
+ elif torch.cuda.is_available():
+ selected_device = torch.device("cuda")
+ device_priority.append("CUDA")
+ elif torch.backends.mps.is_available() and platform.system() == "Darwin":
+ selected_device = torch.device("mps")
+ device_priority.append("MPS")
+ elif not torch.cuda.is_available():
+ selected_device = torch.device("cpu")
+ device_priority.append("CPU")
+
+ FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1, device=selected_device)
+
+ # for debug:
+ print(f"Selected device: {selected_device} and device priority: {device_priority}")
return FACE_ENHANCER