Compare commits

...

11 Commits

Author SHA1 Message Date
Uma Maheshwar Reddy Thipparthi 8334a66e19
Merge 0cc4a2216f into 9086072b8e 2025-06-25 00:27:15 +01:00
Kenneth Estanislao 9086072b8e
Update README.md 2025-06-23 17:06:34 +08:00
KRSHH 12fda0a3ed
fix formatting 2025-06-17 18:42:36 +05:30
KRSHH d963430854
Add techlinked link 2025-06-17 18:42:10 +05:30
KRSHH 5855d15c09
Removed outdated links 2025-06-17 18:35:24 +05:30
KRSHH fcc73d0add
Update Download Button 2025-06-16 14:37:41 +05:30
KRSHH 8d4a386a27
Upgrade prebuilt to 2.1 2025-06-15 22:19:49 +05:30
Chittimalla Krish b98c5234d8
Revert 8bdc14a 2025-06-15 20:08:43 +05:30
Chittimalla Krish 8bdc14a789
Update prebuilt version 2025-06-15 17:50:38 +05:30
Kenneth Estanislao f121083bc8
Update README.md
RTX 50xx support
2025-06-15 02:22:00 +08:00
maheshreddyy345 0cc4a2216f Fix CUDA configuration issue #1140 - add better error handling and fallback 2025-04-30 06:15:13 -07:00
5 changed files with 157 additions and 51 deletions

View File

@ -30,13 +30,13 @@ By using this software, you agree to these terms and commit to using it in a man
Users are expected to use this software responsibly and legally. If using a real person's face, obtain their consent and clearly label any output as a deepfake when sharing online. We are not responsible for end-user actions.
## Exclusive v2.0 Quick Start - Pre-built (Windows)
## Exclusive v2.1 Quick Start - Pre-built (Windows/Mac Silicon)
<a href="https://deeplivecam.net/index.php/quickstart"> <img src="media/Download.png" width="285" height="77" />
##### This is the fastest build you can get if you have a discrete NVIDIA or AMD GPU.
##### This is the fastest build you can get if you have a discrete NVIDIA or AMD GPU or Mac Silicon, And you'll receive special priority support.
###### These Pre-builts are perfect for non-technical users or those who don't have time to, or can't manually install all the requirements. Just a heads-up: this is an open-source project, so you can also install it manually. This will be 60 days ahead on the open source version.
###### These Pre-builts are perfect for non-technical users or those who don't have time to, or can't manually install all the requirements. Just a heads-up: this is an open-source project, so you can also install it manually.
## TLDR; Live Deepfake in just 3 Clicks
![easysteps](https://github.com/user-attachments/assets/af825228-852c-411b-b787-ffd9aac72fc6)
@ -196,7 +196,7 @@ pip install -r requirements.txt
```bash
pip install -U torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu128
pip uninstall onnxruntime onnxruntime-gpu
pip install onnxruntime-gpu==1.16.3
pip install onnxruntime-gpu==1.21.0
```
3. Usage:
@ -304,19 +304,6 @@ python run.py --execution-provider openvino
- Use a screen capture tool like OBS to stream.
- To change the face, select a new source image.
## Tips and Tricks
Check out these helpful guides to get the most out of Deep-Live-Cam:
- [Unlocking the Secrets to the Perfect Deepfake Image](https://deeplivecam.net/index.php/blog/tips-and-tricks/unlocking-the-secrets-to-the-perfect-deepfake-image) - Learn how to create the best deepfake with full head coverage
- [Video Call with DeepLiveCam](https://deeplivecam.net/index.php/blog/tips-and-tricks/video-call-with-deeplivecam) - Make your meetings livelier by using DeepLiveCam with OBS and meeting software
- [Have a Special Guest!](https://deeplivecam.net/index.php/blog/tips-and-tricks/have-a-special-guest) - Tutorial on how to use face mapping to add special guests to your stream
- [Watch Deepfake Movies in Realtime](https://deeplivecam.net/index.php/blog/tips-and-tricks/watch-deepfake-movies-in-realtime) - See yourself star in any video without processing the video
- [Better Quality without Sacrificing Speed](https://deeplivecam.net/index.php/blog/tips-and-tricks/better-quality-without-sacrificing-speed) - Tips for achieving better results without impacting performance
- [Instant Vtuber!](https://deeplivecam.net/index.php/blog/tips-and-tricks/instant-vtuber) - Create a new persona/vtuber easily using Metahuman Creator
Visit our [official blog](https://deeplivecam.net/index.php/blog/tips-and-tricks) for more tips and tutorials.
## Command Line Arguments (Unmaintained)
```
@ -360,6 +347,8 @@ Looking for a CLI mode? Using the -s/--source argument will make the run program
- [*"This real-time webcam deepfake tool raises alarms about the future of identity theft"*](https://www.diyphotography.net/this-real-time-webcam-deepfake-tool-raises-alarms-about-the-future-of-identity-theft/) - DIYPhotography
- [*"That's Crazy, Oh God. That's Fucking Freaky Dude... That's So Wild Dude"*](https://www.youtube.com/watch?time_continue=1074&v=py4Tc-Y8BcY) - SomeOrdinaryGamers
- [*"Alright look look look, now look chat, we can do any face we want to look like chat"*](https://www.youtube.com/live/mFsCe7AIxq8?feature=shared&t=2686) - IShowSpeed
- [*"They do a pretty good job matching poses, expression and even the lighting"*](https://www.youtube.com/watch?v=wnCghLjqv3s&t=551s) - TechLinked (LTT)
## Credits

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.7 KiB

After

Width:  |  Height:  |  Size: 9.6 KiB

View File

@ -114,8 +114,46 @@ def encode_execution_providers(execution_providers: List[str]) -> List[str]:
def decode_execution_providers(execution_providers: List[str]) -> List[str]:
return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers()))
if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)]
try:
available_providers = onnxruntime.get_available_providers()
encoded_available_providers = encode_execution_providers(available_providers)
selected_providers = []
unavailable_providers = []
for execution_provider in execution_providers:
provider_found = False
for provider, encoded_provider in zip(available_providers, encoded_available_providers):
if execution_provider in encoded_provider:
selected_providers.append(provider)
provider_found = True
break
if not provider_found:
unavailable_providers.append(execution_provider)
if 'cuda' in [p.lower() for p in unavailable_providers]:
# CUDA was requested but not available
cuda_path = os.environ.get('CUDA_PATH')
if cuda_path:
update_status(f"Warning: CUDA_PATH is set ({cuda_path}) but CUDA wasn't able to be loaded. Check your CUDA installation.", "DLC.CORE")
if os.path.exists(cuda_path):
# CUDA path exists but couldn't be loaded - likely missing DLLs or incorrect configuration
update_status("CUDA path exists but CUDA libraries couldn't be loaded. Check if the CUDA runtime is properly installed.", "DLC.CORE")
else:
update_status("CUDA_PATH is set but the directory doesn't exist. Check your environment variables.", "DLC.CORE")
else:
update_status("CUDA was requested but no CUDA_PATH is set in environment variables.", "DLC.CORE")
# If no providers were selected, fall back to CPU
if not selected_providers:
update_status("Falling back to CPU execution provider.", "DLC.CORE")
selected_providers = ['CPUExecutionProvider']
return selected_providers
except Exception as e:
update_status(f"Error determining execution providers: {str(e)}. Falling back to CPU.", "DLC.CORE")
return ['CPUExecutionProvider']
def suggest_max_memory() -> int:
@ -160,6 +198,56 @@ def release_resources() -> None:
torch.cuda.empty_cache()
def check_cuda_configuration() -> None:
"""
Check CUDA configuration and provide diagnostic information.
This helps users identify issues with their CUDA setup.
"""
if 'cuda' in [p.lower() for p in encode_execution_providers(modules.globals.execution_providers)]:
update_status("CUDA execution provider requested, checking configuration...", "DLC.CUDA")
# Check for CUDA environment variables
cuda_path = os.environ.get('CUDA_PATH')
if cuda_path:
update_status(f"CUDA_PATH is set to: {cuda_path}", "DLC.CUDA")
# Check if the directory exists
if os.path.exists(cuda_path):
update_status("CUDA_PATH directory exists", "DLC.CUDA")
# Check for critical CUDA DLLs on Windows
if platform.system().lower() == 'windows':
cuda_dll_path = os.path.join(cuda_path, 'bin', 'cudart64_*.dll')
import glob
cuda_dlls = glob.glob(cuda_dll_path)
if cuda_dlls:
update_status(f"CUDA Runtime DLLs found: {', '.join(os.path.basename(dll) for dll in cuda_dlls)}", "DLC.CUDA")
else:
update_status("Warning: No CUDA Runtime DLLs found in CUDA_PATH/bin", "DLC.CUDA")
update_status("This may cause CUDA initialization failures", "DLC.CUDA")
else:
update_status("Warning: CUDA_PATH is set but directory doesn't exist", "DLC.CUDA")
else:
update_status("Warning: CUDA_PATH environment variable is not set", "DLC.CUDA")
# Check if CUDA is in PATH
path_env = os.environ.get('PATH', '')
if cuda_path and cuda_path + '\\bin' in path_env:
update_status("CUDA bin directory is in PATH", "DLC.CUDA")
else:
update_status("Warning: CUDA bin directory not found in PATH", "DLC.CUDA")
update_status("This may prevent CUDA libraries from being found", "DLC.CUDA")
# Try CUDA provider availability directly from onnxruntime
available_providers = onnxruntime.get_available_providers()
if 'CUDAExecutionProvider' in available_providers:
update_status("CUDA provider is available in ONNX Runtime", "DLC.CUDA")
else:
update_status("Warning: CUDA provider is not available in ONNX Runtime", "DLC.CUDA")
update_status("Available providers: " + ', '.join(available_providers), "DLC.CUDA")
def pre_check() -> bool:
if sys.version_info < (3, 9):
update_status('Python version is not supported - please upgrade to 3.9 or higher.')
@ -167,6 +255,10 @@ def pre_check() -> bool:
if not shutil.which('ffmpeg'):
update_status('ffmpeg is not installed.')
return False
# Check CUDA configuration if requested
check_cuda_configuration()
return True

View File

@ -19,8 +19,26 @@ def get_face_analyser() -> Any:
global FACE_ANALYSER
if FACE_ANALYSER is None:
try:
FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=modules.globals.execution_providers)
FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
except Exception as e:
error_msg = str(e)
print(f"[DLC.FACE-ANALYSER] Error initializing face analyser with providers {modules.globals.execution_providers}: {error_msg}")
# If error is CUDA-related, try with CPU provider as fallback
if "cuda" in error_msg.lower() or "gpu" in error_msg.lower():
print("[DLC.FACE-ANALYSER] CUDA error detected. Falling back to CPU provider.")
modules.globals.execution_providers = ['CPUExecutionProvider']
try:
FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=modules.globals.execution_providers)
FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
print("[DLC.FACE-ANALYSER] Successfully initialized with CPU provider as fallback.")
except Exception as fallback_error:
print(f"[DLC.FACE-ANALYSER] Failed to initialize even with fallback provider: {str(fallback_error)}")
raise
else:
raise
return FACE_ANALYSER

View File

@ -61,9 +61,29 @@ def get_face_swapper() -> Any:
with THREAD_LOCK:
if FACE_SWAPPER is None:
model_path = os.path.join(models_dir, "inswapper_128_fp16.onnx")
try:
FACE_SWAPPER = insightface.model_zoo.get_model(
model_path, providers=modules.globals.execution_providers
)
update_status(f"Successfully loaded model with providers: {modules.globals.execution_providers}", NAME)
except Exception as e:
error_msg = str(e)
update_status(f"Error loading model with selected providers: {error_msg}", NAME)
# If the error is related to CUDA, provide more helpful information
if "cuda" in error_msg.lower() or "gpu" in error_msg.lower():
update_status("CUDA error detected. Trying to load with CPU provider instead.", NAME)
modules.globals.execution_providers = ['CPUExecutionProvider']
try:
FACE_SWAPPER = insightface.model_zoo.get_model(
model_path, providers=modules.globals.execution_providers
)
update_status("Successfully loaded model with CPU provider as fallback.", NAME)
except Exception as fallback_error:
update_status(f"Failed to load model even with fallback provider: {str(fallback_error)}", NAME)
raise
else:
raise
return FACE_SWAPPER
@ -430,37 +450,24 @@ def draw_mouth_mask_visualization(
feathered_mask = cv2.GaussianBlur(
mask_region.astype(float), (kernel_size, kernel_size), 0
)
feathered_mask = (feathered_mask / feathered_mask.max() * 255).astype(np.uint8)
# Remove the feathered mask color overlay
# color_feathered_mask = cv2.applyColorMap(feathered_mask, cv2.COLORMAP_VIRIDIS)
feathered_mask = feathered_mask / feathered_mask.max()
# Ensure shapes match before blending feathered mask
# if vis_region.shape == color_feathered_mask.shape:
# blended_feathered = cv2.addWeighted(vis_region, 0.7, color_feathered_mask, 0.3, 0)
# vis_frame[min_y:max_y, min_x:max_x] = blended_feathered
face_mask_roi = face_mask[min_y:max_y, min_x:max_x]
combined_mask = feathered_mask * (face_mask_roi / 255.0)
# Add labels
cv2.putText(
vis_frame,
"Lower Mouth Mask",
(min_x, min_y - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1,
)
cv2.putText(
vis_frame,
"Feathered Mask",
(min_x, max_y + 20),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1,
combined_mask = combined_mask[:, :, np.newaxis]
blended = (
color_corrected_mouth * combined_mask + vis_region * (1 - combined_mask)
).astype(np.uint8)
# Apply face mask to blended result
face_mask_3channel = (
np.repeat(face_mask_roi[:, :, np.newaxis], 3, axis=2) / 255.0
)
final_blend = blended * face_mask_3channel + vis_region * (1 - face_mask_3channel)
vis_frame[min_y:max_y, min_x:max_x] = final_blend.astype(np.uint8)
return vis_frame
return frame
def apply_mouth_area(