Compare commits

...

14 Commits

Author SHA1 Message Date
killerlux 20f3011ce1
Merge dd622d8837 into d5a3fb0c47 2025-05-13 10:05:12 +02:00
Kenneth Estanislao d5a3fb0c47
Merge pull request #1268 from jiacheng-0/main
Update __init__.py
2025-05-13 00:57:09 +08:00
Teo Jia Cheng 9690070399 Update __init__.py 2025-05-13 00:14:49 +08:00
Kenneth Estanislao f3e83b985c
Merge pull request #1210 from KunjShah01/main
Update __init__.py
2025-05-12 15:14:58 +08:00
Kenneth Estanislao e3e3638b79
Merge pull request #1232 from gboeer/patch-1
Add german localization and fix minor typos
2025-05-12 15:14:32 +08:00
Gordon Böer 75122da389
Create german localization 2025-05-07 13:30:22 +02:00
Gordon Böer 7063bba4b3
fix typos in zh.json 2025-05-07 13:24:54 +02:00
Gordon Böer bdbd7dcfbc
fix typos in ui.py 2025-05-07 13:23:31 +02:00
KUNJ SHAH a64940def7 update 2025-05-05 13:19:46 +00:00
KUNJ SHAH fe4a87e8f2 update 2025-05-05 13:19:29 +00:00
KUNJ SHAH 9ecd2dab83 changes 2025-05-05 13:10:00 +00:00
KUNJ SHAH c9f36eb350
Update __init__.py 2025-05-05 18:29:44 +05:30
Aymen Kouskoussi dd622d8837 Add security policy, pull request template, and new issue templates (feature request, question) 2025-05-03 09:20:13 +02:00
Aymen Kouskoussi 02ecc245bd Fix: Improve Linux CPU execution and webcam handling. Update README. 2025-05-03 09:16:41 +02:00
11 changed files with 198 additions and 15 deletions

View File

@ -0,0 +1,17 @@
---
name: Feature Request
description: Suggest an idea for this project
labels: enhancement
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@ -0,0 +1,11 @@
---
name: Question
about: Ask a question or request support
labels: question
---
**Your question**
Please describe your question or what you need help with.
**Context**
Add any other context or details that might help us answer your question.

View File

@ -0,0 +1,22 @@
# Pull Request Template
Thank you for your contribution!
Please fill out the following checklist and provide all relevant information to help us review your pull request.
## Description
Please include a summary of the change and which issue is fixed (if any). Also describe your motivation and context.
Fixes #(issue)
## Checklist
- [ ] My code follows the project style and guidelines
- [ ] I have performed a self-review of my code
- [ ] I have tested the changes and they work as expected
- [ ] I have added tests that prove my fix is effective or that my feature works (if applicable)
- [ ] I have added necessary documentation (if appropriate)
## Additional Information
Please add any other information or screenshots that may help the reviewers.

11
.github/SECURITY.md vendored 100644
View File

@ -0,0 +1,11 @@
# Security Policy
## Reporting a Vulnerability
If you discover a security vulnerability, please report it by emailing the project maintainers at [your-email@example.com].
- Do **not** create a public issue for security vulnerabilities.
- Provide as much information as possible to help us understand and address the issue quickly.
- We will acknowledge your report within 3 business days and strive to resolve all security issues promptly.
Thank you for helping keep this project and its users safe!

View File

@ -148,6 +148,31 @@ source venv/bin/activate
pip install -r requirements.txt
```
**For Linux (Debian/Ubuntu based):**
```bash
# Install system dependencies (if needed)
sudo apt-get update
sudo apt-get install python3-venv python3-pip ffmpeg git
# Create and activate virtual environment
python3 -m venv venv
source venv/bin/activate
# Install Python dependencies
# (Important: Ensure you have CPU-only versions if not using GPU)
pip uninstall -y torch torchvision torchaudio onnxruntime*
pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu
# Install webcam utilities (optional but helpful for troubleshooting)
sudo apt-get install v4l-utils
# Ensure your user is in the 'video' group for webcam access
# (You might need to log out and log back in after adding)
sudo adduser $USER video
groups
```
**For macOS:**
Apple Silicon (M1/M2/M3) requires specific setup:
@ -181,7 +206,7 @@ source venv/bin/activate
pip install -r requirements.txt
```
**Run:** If you don't have a GPU, you can run Deep-Live-Cam using `python run.py`. Note that initial execution will download models (~300MB).
**Run:** If you don't have a GPU, you can run Deep-Live-Cam using `python run.py` or `python run.py --execution-provider cpu`. Note that initial execution will download models (~300MB). Performance will be very low (potentially < 1 FPS) without a compatible GPU.
### GPU Acceleration

46
locales/de.json 100644
View File

@ -0,0 +1,46 @@
{
"Source x Target Mapper": "Quelle x Ziel Zuordnung",
"select a source image": "Wähle ein Quellbild",
"Preview": "Vorschau",
"select a target image or video": "Wähle ein Zielbild oder Video",
"save image output file": "Bildausgabedatei speichern",
"save video output file": "Videoausgabedatei speichern",
"select a target image": "Wähle ein Zielbild",
"source": "Quelle",
"Select a target": "Wähle ein Ziel",
"Select a face": "Wähle ein Gesicht",
"Keep audio": "Audio beibehalten",
"Face Enhancer": "Gesichtsverbesserung",
"Many faces": "Mehrere Gesichter",
"Show FPS": "FPS anzeigen",
"Keep fps": "FPS beibehalten",
"Keep frames": "Frames beibehalten",
"Fix Blueish Cam": "Bläuliche Kamera korrigieren",
"Mouth Mask": "Mundmaske",
"Show Mouth Mask Box": "Mundmaskenrahmen anzeigen",
"Start": "Starten",
"Live": "Live",
"Destroy": "Beenden",
"Map faces": "Gesichter zuordnen",
"Processing...": "Verarbeitung läuft...",
"Processing succeed!": "Verarbeitung erfolgreich!",
"Processing ignored!": "Verarbeitung ignoriert!",
"Failed to start camera": "Kamera konnte nicht gestartet werden",
"Please complete pop-up or close it.": "Bitte das Pop-up komplettieren oder schließen.",
"Getting unique faces": "Einzigartige Gesichter erfassen",
"Please select a source image first": "Bitte zuerst ein Quellbild auswählen",
"No faces found in target": "Keine Gesichter im Zielbild gefunden",
"Add": "Hinzufügen",
"Clear": "Löschen",
"Submit": "Absenden",
"Select source image": "Quellbild auswählen",
"Select target image": "Zielbild auswählen",
"Please provide mapping!": "Bitte eine Zuordnung angeben!",
"At least 1 source with target is required!": "Mindestens eine Quelle mit einem Ziel ist erforderlich!",
"At least 1 source with target is required!": "Mindestens eine Quelle mit einem Ziel ist erforderlich!",
"Face could not be detected in last upload!": "Im letzten Upload konnte kein Gesicht erkannt werden!",
"Select Camera:": "Kamera auswählen:",
"All mappings cleared!": "Alle Zuordnungen gelöscht!",
"Mappings successfully submitted!": "Zuordnungen erfolgreich übermittelt!",
"Source x Target Mapper is already open.": "Quell-zu-Ziel-Zuordnung ist bereits geöffnet."
}

View File

@ -1,11 +1,11 @@
{
"Source x Target Mapper": "Source x Target Mapper",
"select an source image": "选择一个源图像",
"select a source image": "选择一个源图像",
"Preview": "预览",
"select an target image or video": "选择一个目标图像或视频",
"select a target image or video": "选择一个目标图像或视频",
"save image output file": "保存图像输出文件",
"save video output file": "保存视频输出文件",
"select an target image": "选择一个目标图像",
"select a target image": "选择一个目标图像",
"source": "源",
"Select a target": "选择一个目标",
"Select a face": "选择一张脸",
@ -36,11 +36,11 @@
"Select source image": "请选取源图像",
"Select target image": "请选取目标图像",
"Please provide mapping!": "请提供映射",
"Atleast 1 source with target is required!": "至少需要一个来源图像与目标图像相关!",
"At least 1 source with target is required!": "至少需要一个来源图像与目标图像相关!",
"At least 1 source with target is required!": "至少需要一个来源图像与目标图像相关!",
"Face could not be detected in last upload!": "最近上传的图像中没有检测到人脸!",
"Select Camera:": "选择摄像头",
"All mappings cleared!": "所有映射均已清除!",
"Mappings successfully submitted!": "成功提交映射!",
"Source x Target Mapper is already open.": "源 x 目标映射器已打开。"
}
}

View File

@ -0,0 +1,18 @@
import os
import cv2
import numpy as np
# Utility function to support unicode characters in file paths for reading
def imread_unicode(path, flags=cv2.IMREAD_COLOR):
return cv2.imdecode(np.fromfile(path, dtype=np.uint8), flags)
# Utility function to support unicode characters in file paths for writing
def imwrite_unicode(path, img, params=None):
root, ext = os.path.splitext(path)
if not ext:
ext = ".png"
result, encoded_img = cv2.imencode(ext, img, params if params else [])
result, encoded_img = cv2.imencode(f".{ext}", img, params if params is not None else [])
encoded_img.tofile(path)
return True
return False

View File

@ -4,7 +4,11 @@ import sys
if any(arg.startswith('--execution-provider') for arg in sys.argv):
os.environ['OMP_NUM_THREADS'] = '1'
# reduce tensorflow log level
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# disable GPU for tensorflow when using CPU provider
if '--execution-provider' in sys.argv and 'cpu' in sys.argv[sys.argv.index('--execution-provider') + 1]:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import warnings
from typing import List
import platform
@ -81,6 +85,13 @@ def parse_args() -> None:
modules.globals.execution_threads = args.execution_threads
modules.globals.lang = args.lang
# If using CPU provider, ensure we're not using any GPU features
if 'cpu' in args.execution_provider:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.set_device('cpu')
#for ENHANCER tumbler:
if 'face_enhancer' in args.frame_processor:
modules.globals.fp_ui['face_enhancer'] = True

View File

@ -429,7 +429,7 @@ def create_source_target_popup(
POPUP.destroy()
select_output_path(start)
else:
update_pop_status("Atleast 1 source with target is required!")
update_pop_status("At least 1 source with target is required!")
scrollable_frame = ctk.CTkScrollableFrame(
POPUP, width=POPUP_SCROLL_WIDTH, height=POPUP_SCROLL_HEIGHT
@ -489,7 +489,7 @@ def update_popup_source(
global source_label_dict
source_path = ctk.filedialog.askopenfilename(
title=_("select an source image"),
title=_("select a source image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)
@ -584,7 +584,7 @@ def select_source_path() -> None:
PREVIEW.withdraw()
source_path = ctk.filedialog.askopenfilename(
title=_("select an source image"),
title=_("select a source image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)
@ -627,7 +627,7 @@ def select_target_path() -> None:
PREVIEW.withdraw()
target_path = ctk.filedialog.askopenfilename(
title=_("select an target image or video"),
title=_("select a target image or video"),
initialdir=RECENT_DIRECTORY_TARGET,
filetypes=[img_ft, vid_ft],
)
@ -1108,7 +1108,7 @@ def update_webcam_source(
global source_label_dict_live
source_path = ctk.filedialog.askopenfilename(
title=_("select an source image"),
title=_("select a source image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)
@ -1160,7 +1160,7 @@ def update_webcam_target(
global target_label_dict_live
target_path = ctk.filedialog.askopenfilename(
title=_("select an target image"),
title=_("select a target image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)

View File

@ -42,15 +42,31 @@ class VideoCapturer:
for dev_id, backend in capture_methods:
try:
print(f"Trying device {dev_id} with backend {backend}")
self.cap = cv2.VideoCapture(dev_id, backend)
if self.cap.isOpened():
print(f"Successfully opened device {dev_id} with backend {backend}")
break
self.cap.release()
except Exception:
except Exception as e:
print(f"Failed to open device {dev_id} with backend {backend}: {str(e)}")
continue
else:
# Unix-like systems (Linux/Mac) capture method
self.cap = cv2.VideoCapture(self.device_index)
# Try device 0 first, then the specified device index if different
capture_methods = [(0, cv2.CAP_V4L2), (self.device_index, cv2.CAP_V4L2)] if self.device_index != 0 else [(0, cv2.CAP_V4L2)]
for dev_id, backend in capture_methods:
try:
print(f"Trying device {dev_id} with backend {backend}")
self.cap = cv2.VideoCapture(dev_id, backend)
if self.cap.isOpened():
print(f"Successfully opened device {dev_id} with backend {backend}")
break
self.cap.release()
except Exception as e:
print(f"Failed to open device {dev_id} with backend {backend}: {str(e)}")
continue
if not self.cap or not self.cap.isOpened():
raise RuntimeError("Failed to open camera")
@ -60,6 +76,12 @@ class VideoCapturer:
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.cap.set(cv2.CAP_PROP_FPS, fps)
# Print actual camera settings
actual_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
actual_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
actual_fps = self.cap.get(cv2.CAP_PROP_FPS)
print(f"Camera initialized with: {actual_width}x{actual_height} @ {actual_fps}fps")
self.is_running = True
return True