Merge branch 'main' into new_feature

pull/1340/head
Christoph9211 2025-06-08 17:04:56 -05:00
commit b31a3dc2bd
19 changed files with 614 additions and 188 deletions

View File

@ -30,6 +30,13 @@ By using this software, you agree to these terms and commit to using it in a man
Users are expected to use this software responsibly and legally. If using a real person's face, obtain their consent and clearly label any output as a deepfake when sharing online. We are not responsible for end-user actions.
## Exclusive v2.0 Quick Start - Pre-built (Windows)
<a href="https://deeplivecam.net/index.php/quickstart"> <img src="media/Download.png" width="285" height="77" />
##### This is the fastest build you can get if you have a discrete NVIDIA or AMD GPU.
###### These Pre-builts are perfect for non-technical users or those who don't have time to, or can't manually install all the requirements. Just a heads-up: this is an open-source project, so you can also install it manually. This will be 60 days ahead on the open source version.
## TLDR; Live Deepfake in just 3 Clicks
![easysteps](https://github.com/user-attachments/assets/af825228-852c-411b-b787-ffd9aac72fc6)
@ -126,12 +133,20 @@ Place these files in the "**models**" folder.
We highly recommend using a `venv` to avoid issues.
For Windows:
```bash
python -m venv venv
venv\Scripts\activate
pip install -r requirements.txt
```
For Linux:
```bash
# Ensure you use the installed Python 3.10
python3 -m venv venv
source venv/bin/activate
pip install -r requirements.txt
```
**For macOS:**
@ -172,10 +187,14 @@ pip install -r requirements.txt
**CUDA Execution Provider (Nvidia)**
1. Install [CUDA Toolkit 11.8.0](https://developer.nvidia.com/cuda-11-8-0-download-archive)
2. Install dependencies:
1. Install [CUDA Toolkit 12.8.0](https://developer.nvidia.com/cuda-12-8-0-download-archive)
2. Install [cuDNN v8.9.7 for CUDA 12.x](https://developer.nvidia.com/rdp/cudnn-archive) (required for onnxruntime-gpu):
- Download cuDNN v8.9.7 for CUDA 12.x
- Make sure the cuDNN bin directory is in your system PATH
3. Install dependencies:
```bash
pip install -U torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu128
pip uninstall onnxruntime onnxruntime-gpu
pip install onnxruntime-gpu==1.16.3
```

46
locales/de.json 100644
View File

@ -0,0 +1,46 @@
{
"Source x Target Mapper": "Quelle x Ziel Zuordnung",
"select a source image": "Wähle ein Quellbild",
"Preview": "Vorschau",
"select a target image or video": "Wähle ein Zielbild oder Video",
"save image output file": "Bildausgabedatei speichern",
"save video output file": "Videoausgabedatei speichern",
"select a target image": "Wähle ein Zielbild",
"source": "Quelle",
"Select a target": "Wähle ein Ziel",
"Select a face": "Wähle ein Gesicht",
"Keep audio": "Audio beibehalten",
"Face Enhancer": "Gesichtsverbesserung",
"Many faces": "Mehrere Gesichter",
"Show FPS": "FPS anzeigen",
"Keep fps": "FPS beibehalten",
"Keep frames": "Frames beibehalten",
"Fix Blueish Cam": "Bläuliche Kamera korrigieren",
"Mouth Mask": "Mundmaske",
"Show Mouth Mask Box": "Mundmaskenrahmen anzeigen",
"Start": "Starten",
"Live": "Live",
"Destroy": "Beenden",
"Map faces": "Gesichter zuordnen",
"Processing...": "Verarbeitung läuft...",
"Processing succeed!": "Verarbeitung erfolgreich!",
"Processing ignored!": "Verarbeitung ignoriert!",
"Failed to start camera": "Kamera konnte nicht gestartet werden",
"Please complete pop-up or close it.": "Bitte das Pop-up komplettieren oder schließen.",
"Getting unique faces": "Einzigartige Gesichter erfassen",
"Please select a source image first": "Bitte zuerst ein Quellbild auswählen",
"No faces found in target": "Keine Gesichter im Zielbild gefunden",
"Add": "Hinzufügen",
"Clear": "Löschen",
"Submit": "Absenden",
"Select source image": "Quellbild auswählen",
"Select target image": "Zielbild auswählen",
"Please provide mapping!": "Bitte eine Zuordnung angeben!",
"At least 1 source with target is required!": "Mindestens eine Quelle mit einem Ziel ist erforderlich!",
"At least 1 source with target is required!": "Mindestens eine Quelle mit einem Ziel ist erforderlich!",
"Face could not be detected in last upload!": "Im letzten Upload konnte kein Gesicht erkannt werden!",
"Select Camera:": "Kamera auswählen:",
"All mappings cleared!": "Alle Zuordnungen gelöscht!",
"Mappings successfully submitted!": "Zuordnungen erfolgreich übermittelt!",
"Source x Target Mapper is already open.": "Quell-zu-Ziel-Zuordnung ist bereits geöffnet."
}

46
locales/es.json 100644
View File

@ -0,0 +1,46 @@
{
"Source x Target Mapper": "Mapeador de fuente x destino",
"select a source image": "Seleccionar imagen fuente",
"Preview": "Vista previa",
"select a target image or video": "elegir un video o una imagen fuente",
"save image output file": "guardar imagen final",
"save video output file": "guardar video final",
"select a target image": "elegir una imagen objetiva",
"source": "fuente",
"Select a target": "Elegir un destino",
"Select a face": "Elegir una cara",
"Keep audio": "Mantener audio original",
"Face Enhancer": "Potenciador de caras",
"Many faces": "Varias caras",
"Show FPS": "Mostrar fps",
"Keep fps": "Mantener fps",
"Keep frames": "Mantener frames",
"Fix Blueish Cam": "Corregir tono azul de video",
"Mouth Mask": "Máscara de boca",
"Show Mouth Mask Box": "Mostrar área de la máscara de boca",
"Start": "Iniciar",
"Live": "En vivo",
"Destroy": "Borrar",
"Map faces": "Mapear caras",
"Processing...": "Procesando...",
"Processing succeed!": "¡Proceso terminado con éxito!",
"Processing ignored!": "¡Procesamiento omitido!",
"Failed to start camera": "No se pudo iniciar la cámara",
"Please complete pop-up or close it.": "Complete o cierre el pop-up",
"Getting unique faces": "Buscando caras únicas",
"Please select a source image first": "Primero, seleccione una imagen fuente",
"No faces found in target": "No se encontró una cara en el destino",
"Add": "Agregar",
"Clear": "Limpiar",
"Submit": "Enviar",
"Select source image": "Seleccionar imagen fuente",
"Select target image": "Seleccionar imagen destino",
"Please provide mapping!": "Por favor, proporcione un mapeo",
"At least 1 source with target is required!": "Se requiere al menos una fuente con un destino.",
"At least 1 source with target is required!": "Se requiere al menos una fuente con un destino.",
"Face could not be detected in last upload!": "¡No se pudo encontrar una cara en el último video o imagen!",
"Select Camera:": "Elegir cámara:",
"All mappings cleared!": "¡Todos los mapeos fueron borrados!",
"Mappings successfully submitted!": "Mapeos enviados con éxito!",
"Source x Target Mapper is already open.": "El mapeador de fuente x destino ya está abierto."
}

46
locales/fi.json 100644
View File

@ -0,0 +1,46 @@
{
"Source x Target Mapper": "Source x Target Kartoitin",
"select an source image": "Valitse lähde kuva",
"Preview": "Esikatsele",
"select an target image or video": "Valitse kohde kuva tai video",
"save image output file": "tallenna kuva",
"save video output file": "tallenna video",
"select an target image": "Valitse kohde kuva",
"source": "lähde",
"Select a target": "Valitse kohde",
"Select a face": "Valitse kasvot",
"Keep audio": "Säilytä ääni",
"Face Enhancer": "Kasvojen Parantaja",
"Many faces": "Useampia kasvoja",
"Show FPS": "Näytä FPS",
"Keep fps": "Säilytä FPS",
"Keep frames": "Säilytä ruudut",
"Fix Blueish Cam": "Korjaa Sinertävä Kamera",
"Mouth Mask": "Suu Maski",
"Show Mouth Mask Box": "Näytä Suu Maski Laatiko",
"Start": "Aloita",
"Live": "Live",
"Destroy": "Tuhoa",
"Map faces": "Kartoita kasvot",
"Processing...": "Prosessoi...",
"Processing succeed!": "Prosessointi onnistui!",
"Processing ignored!": "Prosessointi lopetettu!",
"Failed to start camera": "Kameran käynnistäminen epäonnistui",
"Please complete pop-up or close it.": "Viimeistele tai sulje ponnahdusikkuna",
"Getting unique faces": "Hankitaan uniikkeja kasvoja",
"Please select a source image first": "Valitse ensin lähde kuva",
"No faces found in target": "Kasvoja ei löydetty kohteessa",
"Add": "Lisää",
"Clear": "Tyhjennä",
"Submit": "Lähetä",
"Select source image": "Valitse lähde kuva",
"Select target image": "Valitse kohde kuva",
"Please provide mapping!": "Tarjoa kartoitus!",
"Atleast 1 source with target is required!": "Vähintään 1 lähde kohteen kanssa on vaadittu!",
"At least 1 source with target is required!": "Vähintään 1 lähde kohteen kanssa on vaadittu!",
"Face could not be detected in last upload!": "Kasvoja ei voitu tunnistaa edellisessä latauksessa!",
"Select Camera:": "Valitse Kamera:",
"All mappings cleared!": "Kaikki kartoitukset tyhjennetty!",
"Mappings successfully submitted!": "Kartoitukset lähetety onnistuneesti!",
"Source x Target Mapper is already open.": "Lähde x Kohde Kartoittaja on jo auki."
}

45
locales/km.json 100644
View File

@ -0,0 +1,45 @@
{
"Source x Target Mapper": "ប្រភប x បន្ថែម Mapper",
"select a source image": "ជ្រើសរើសប្រភពរូបភាព",
"Preview": "បង្ហាញ",
"select a target image or video": "ជ្រើសរើសគោលដៅរូបភាពឬវីដេអូ",
"save image output file": "រក្សាទុកលទ្ធផលឯកសាររូបភាព",
"save video output file": "រក្សាទុកលទ្ធផលឯកសារវីដេអូ",
"select a target image": "ជ្រើសរើសគោលដៅរូបភាព",
"source": "ប្រភព",
"Select a target": "ជ្រើសរើសគោលដៅ",
"Select a face": "ជ្រើសរើសមុខ",
"Keep audio": "រម្លងសម្លេង",
"Face Enhancer": "ឧបករណ៍ពង្រឹងមុខ",
"Many faces": "ទម្រង់មុខច្រើន",
"Show FPS": "បង្ហាញ FPS",
"Keep fps": "រម្លង fps",
"Keep frames": "រម្លងទម្រង់",
"Fix Blueish Cam": "ជួសជុល Cam Blueish",
"Mouth Mask": "របាំងមាត់",
"Show Mouth Mask Box": "បង្ហាញប្រអប់របាំងមាត់",
"Start": "ចាប់ផ្ដើម",
"Live": "ផ្សាយផ្ទាល់",
"Destroy": "លុប",
"Map faces": "ផែនទីមុខ",
"Processing...": "កំពុងដំណើរការ...",
"Processing succeed!": "ការដំណើរការទទួលបានជោគជ័យ!",
"Processing ignored!": "ការដំណើរការមិនទទួលបានជោគជ័យ!",
"Failed to start camera": "បរាជ័យដើម្បីចាប់ផ្ដើមបើកកាមេរ៉ា",
"Please complete pop-up or close it.": "សូមបញ្ចប់ផ្ទាំងផុស ឬបិទវា.",
"Getting unique faces": "ការចាប់ផ្ដើមទម្រង់មុខប្លែក",
"Please select a source image first": "សូមជ្រើសរើសប្រភពរូបភាពដំបូង",
"No faces found in target": "រកអត់ឃើញមុខនៅក្នុងគោលដៅ",
"Add": "បន្ថែម",
"Clear": "សម្អាត",
"Submit": "បញ្ចូន",
"Select source image": "ជ្រើសរើសប្រភពរូបភាព",
"Select target image": "ជ្រើសរើសគោលដៅរូបភាព",
"Please provide mapping!": "សូមផ្ដល់នៅផែនទី",
"At least 1 source with target is required!": "ត្រូវការប្រភពយ៉ាងហោចណាស់ ១ ដែលមានគោលដៅ!",
"Face could not be detected in last upload!": "មុខមិនអាចភ្ជាប់នៅក្នុងការបង្ហេាះចុងក្រោយ!",
"Select Camera:": "ជ្រើសរើសកាមេរ៉ា",
"All mappings cleared!": "ផែនទីទាំងអស់ត្រូវបានសម្អាត!",
"Mappings successfully submitted!": "ផែនទីត្រូវបានបញ្ជូនជោគជ័យ!",
"Source x Target Mapper is already open.": "ប្រភព x Target Mapper បានបើករួចហើយ។"
}

45
locales/ko.json 100644
View File

@ -0,0 +1,45 @@
{
"Source x Target Mapper": "소스 x 타겟 매퍼",
"select a source image": "소스 이미지 선택",
"Preview": "미리보기",
"select a target image or video": "타겟 이미지 또는 영상 선택",
"save image output file": "이미지 출력 파일 저장",
"save video output file": "영상 출력 파일 저장",
"select a target image": "타겟 이미지 선택",
"source": "소스",
"Select a target": "타겟 선택",
"Select a face": "얼굴 선택",
"Keep audio": "오디오 유지",
"Face Enhancer": "얼굴 향상",
"Many faces": "여러 얼굴",
"Show FPS": "FPS 표시",
"Keep fps": "FPS 유지",
"Keep frames": "프레임 유지",
"Fix Blueish Cam": "푸른빛 카메라 보정",
"Mouth Mask": "입 마스크",
"Show Mouth Mask Box": "입 마스크 박스 표시",
"Start": "시작",
"Live": "라이브",
"Destroy": "종료",
"Map faces": "얼굴 매핑",
"Processing...": "처리 중...",
"Processing succeed!": "처리 성공!",
"Processing ignored!": "처리 무시됨!",
"Failed to start camera": "카메라 시작 실패",
"Please complete pop-up or close it.": "팝업을 완료하거나 닫아주세요.",
"Getting unique faces": "고유 얼굴 가져오는 중",
"Please select a source image first": "먼저 소스 이미지를 선택해주세요",
"No faces found in target": "타겟에서 얼굴을 찾을 수 없음",
"Add": "추가",
"Clear": "지우기",
"Submit": "제출",
"Select source image": "소스 이미지 선택",
"Select target image": "타겟 이미지 선택",
"Please provide mapping!": "매핑을 입력해주세요!",
"At least 1 source with target is required!": "최소 하나의 소스와 타겟이 필요합니다!",
"Face could not be detected in last upload!": "최근 업로드에서 얼굴을 감지할 수 없습니다!",
"Select Camera:": "카메라 선택:",
"All mappings cleared!": "모든 매핑이 삭제되었습니다!",
"Mappings successfully submitted!": "매핑이 성공적으로 제출되었습니다!",
"Source x Target Mapper is already open.": "소스 x 타겟 매퍼가 이미 열려 있습니다."
}

46
locales/pt-br.json 100644
View File

@ -0,0 +1,46 @@
{
"Source x Target Mapper": "Mapeador de Origem x Destino",
"select an source image": "Escolha uma imagem de origem",
"Preview": "Prévia",
"select an target image or video": "Escolha uma imagem ou vídeo de destino",
"save image output file": "Salvar imagem final",
"save video output file": "Salvar vídeo final",
"select an target image": "Escolha uma imagem de destino",
"source": "Origem",
"Select a target": "Escolha o destino",
"Select a face": "Escolha um rosto",
"Keep audio": "Manter o áudio original",
"Face Enhancer": "Melhorar rosto",
"Many faces": "Vários rostos",
"Show FPS": "Mostrar FPS",
"Keep fps": "Manter FPS",
"Keep frames": "Manter frames",
"Fix Blueish Cam": "Corrigir tom azulado da câmera",
"Mouth Mask": "Máscara da boca",
"Show Mouth Mask Box": "Mostrar área da máscara da boca",
"Start": "Começar",
"Live": "Ao vivo",
"Destroy": "Destruir",
"Map faces": "Mapear rostos",
"Processing...": "Processando...",
"Processing succeed!": "Tudo certo!",
"Processing ignored!": "Processamento ignorado!",
"Failed to start camera": "Não foi possível iniciar a câmera",
"Please complete pop-up or close it.": "Finalize ou feche o pop-up",
"Getting unique faces": "Buscando rostos diferentes",
"Please select a source image first": "Selecione primeiro uma imagem de origem",
"No faces found in target": "Nenhum rosto encontrado na imagem de destino",
"Add": "Adicionar",
"Clear": "Limpar",
"Submit": "Enviar",
"Select source image": "Escolha a imagem de origem",
"Select target image": "Escolha a imagem de destino",
"Please provide mapping!": "Você precisa realizar o mapeamento!",
"Atleast 1 source with target is required!": "É necessária pelo menos uma origem com um destino!",
"At least 1 source with target is required!": "É necessária pelo menos uma origem com um destino!",
"Face could not be detected in last upload!": "Não conseguimos detectar o rosto na última imagem!",
"Select Camera:": "Escolher câmera:",
"All mappings cleared!": "Todos os mapeamentos foram removidos!",
"Mappings successfully submitted!": "Mapeamentos enviados com sucesso!",
"Source x Target Mapper is already open.": "O Mapeador de Origem x Destino já está aberto."
}

45
locales/ru.json 100644
View File

@ -0,0 +1,45 @@
{
"Source x Target Mapper": "Сопоставитель Источник x Цель",
"select a source image": "выберите исходное изображение",
"Preview": "Предпросмотр",
"select a target image or video": "выберите целевое изображение или видео",
"save image output file": "сохранить выходной файл изображения",
"save video output file": "сохранить выходной файл видео",
"select a target image": "выберите целевое изображение",
"source": "источник",
"Select a target": "Выберите целевое изображение",
"Select a face": "Выберите лицо",
"Keep audio": "Сохранить аудио",
"Face Enhancer": "Улучшение лица",
"Many faces": "Несколько лиц",
"Show FPS": "Показать FPS",
"Keep fps": "Сохранить FPS",
"Keep frames": "Сохранить кадры",
"Fix Blueish Cam": "Исправить синеву камеры",
"Mouth Mask": "Маска рта",
"Show Mouth Mask Box": "Показать рамку маски рта",
"Start": "Старт",
"Live": "В реальном времени",
"Destroy": "Остановить",
"Map faces": "Сопоставить лица",
"Processing...": "Обработка...",
"Processing succeed!": "Обработка успешна!",
"Processing ignored!": "Обработка проигнорирована!",
"Failed to start camera": "Не удалось запустить камеру",
"Please complete pop-up or close it.": "Пожалуйста, заполните всплывающее окно или закройте его.",
"Getting unique faces": "Получение уникальных лиц",
"Please select a source image first": "Сначала выберите исходное изображение, пожалуйста",
"No faces found in target": "В целевом изображении не найдено лиц",
"Add": "Добавить",
"Clear": "Очистить",
"Submit": "Отправить",
"Select source image": "Выбрать исходное изображение",
"Select target image": "Выбрать целевое изображение",
"Please provide mapping!": "Пожалуйста, укажите сопоставление!",
"At least 1 source with target is required!": "Требуется хотя бы 1 источник с целью!",
"Face could not be detected in last upload!": "Лицо не обнаружено в последнем загруженном изображении!",
"Select Camera:": "Выберите камеру:",
"All mappings cleared!": "Все сопоставления очищены!",
"Mappings successfully submitted!": "Сопоставления успешно отправлены!",
"Source x Target Mapper is already open.": "Сопоставитель Источник-Цель уже открыт."
}

45
locales/th.json 100644
View File

@ -0,0 +1,45 @@
{
"Source x Target Mapper": "ตัวจับคู่ต้นทาง x ปลายทาง",
"select a source image": "เลือกรูปภาพต้นฉบับ",
"Preview": "ตัวอย่าง",
"select a target image or video": "เลือกรูปภาพหรือวิดีโอเป้าหมาย",
"save image output file": "บันทึกไฟล์รูปภาพ",
"save video output file": "บันทึกไฟล์วิดีโอ",
"select a target image": "เลือกรูปภาพเป้าหมาย",
"source": "ต้นฉบับ",
"Select a target": "เลือกเป้าหมาย",
"Select a face": "เลือกใบหน้า",
"Keep audio": "เก็บเสียง",
"Face Enhancer": "ปรับปรุงใบหน้า",
"Many faces": "หลายใบหน้า",
"Show FPS": "แสดง FPS",
"Keep fps": "คงค่า FPS",
"Keep frames": "คงค่าเฟรม",
"Fix Blueish Cam": "แก้ไขภาพอมฟ้าจากกล้อง",
"Mouth Mask": "มาสก์ปาก",
"Show Mouth Mask Box": "แสดงกรอบมาสก์ปาก",
"Start": "เริ่ม",
"Live": "สด",
"Destroy": "หยุด",
"Map faces": "จับคู่ใบหน้า",
"Processing...": "กำลังประมวลผล...",
"Processing succeed!": "ประมวลผลสำเร็จแล้ว!",
"Processing ignored!": "การประมวลผลถูกละเว้น",
"Failed to start camera": "ไม่สามารถเริ่มกล้องได้",
"Please complete pop-up or close it.": "โปรดดำเนินการในป๊อปอัปให้เสร็จสิ้น หรือปิด",
"Getting unique faces": "กำลังค้นหาใบหน้าที่ไม่ซ้ำกัน",
"Please select a source image first": "โปรดเลือกภาพต้นฉบับก่อน",
"No faces found in target": "ไม่พบใบหน้าในภาพเป้าหมาย",
"Add": "เพิ่ม",
"Clear": "ล้าง",
"Submit": "ส่ง",
"Select source image": "เลือกภาพต้นฉบับ",
"Select target image": "เลือกภาพเป้าหมาย",
"Please provide mapping!": "โปรดระบุการจับคู่!",
"At least 1 source with target is required!": "ต้องมีการจับคู่ต้นฉบับกับเป้าหมายอย่างน้อย 1 คู่!",
"Face could not be detected in last upload!": "ไม่สามารถตรวจพบใบหน้าในไฟล์อัปโหลดล่าสุด!",
"Select Camera:": "เลือกกล้อง:",
"All mappings cleared!": "ล้างการจับคู่ทั้งหมดแล้ว!",
"Mappings successfully submitted!": "ส่งการจับคู่สำเร็จแล้ว!",
"Source x Target Mapper is already open.": "ตัวจับคู่ต้นทาง x ปลายทาง เปิดอยู่แล้ว"
}

View File

@ -1,11 +1,11 @@
{
"Source x Target Mapper": "Source x Target Mapper",
"select an source image": "选择一个源图像",
"select a source image": "选择一个源图像",
"Preview": "预览",
"select an target image or video": "选择一个目标图像或视频",
"select a target image or video": "选择一个目标图像或视频",
"save image output file": "保存图像输出文件",
"save video output file": "保存视频输出文件",
"select an target image": "选择一个目标图像",
"select a target image": "选择一个目标图像",
"source": "源",
"Select a target": "选择一个目标",
"Select a face": "选择一张脸",
@ -36,11 +36,11 @@
"Select source image": "请选取源图像",
"Select target image": "请选取目标图像",
"Please provide mapping!": "请提供映射",
"Atleast 1 source with target is required!": "至少需要一个来源图像与目标图像相关!",
"At least 1 source with target is required!": "至少需要一个来源图像与目标图像相关!",
"At least 1 source with target is required!": "至少需要一个来源图像与目标图像相关!",
"Face could not be detected in last upload!": "最近上传的图像中没有检测到人脸!",
"Select Camera:": "选择摄像头",
"All mappings cleared!": "所有映射均已清除!",
"Mappings successfully submitted!": "成功提交映射!",
"Source x Target Mapper is already open.": "源 x 目标映射器已打开。"
}
}

BIN
media/Download.png 100644

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.0 KiB

View File

@ -0,0 +1,18 @@
import os
import cv2
import numpy as np
# Utility function to support unicode characters in file paths for reading
def imread_unicode(path, flags=cv2.IMREAD_COLOR):
return cv2.imdecode(np.fromfile(path, dtype=np.uint8), flags)
# Utility function to support unicode characters in file paths for writing
def imwrite_unicode(path, img, params=None):
root, ext = os.path.splitext(path)
if not ext:
ext = ".png"
result, encoded_img = cv2.imencode(ext, img, params if params else [])
result, encoded_img = cv2.imencode(f".{ext}", img, params if params is not None else [])
encoded_img.tofile(path)
return True
return False

View File

@ -1,3 +1,3 @@
name = 'Deep-Live-Cam'
version = '1.9'
version = '1.8'
edition = 'GitHub Edition'

View File

@ -43,18 +43,29 @@ def get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType
def set_frame_processors_modules_from_ui(frame_processors: List[str]) -> None:
global FRAME_PROCESSORS_MODULES
current_processor_names = [proc.__name__.split('.')[-1] for proc in FRAME_PROCESSORS_MODULES]
for frame_processor, state in modules.globals.fp_ui.items():
if state == True and frame_processor not in frame_processors:
frame_processor_module = load_frame_processor_module(frame_processor)
FRAME_PROCESSORS_MODULES.append(frame_processor_module)
modules.globals.frame_processors.append(frame_processor)
if state == False:
if state == True and frame_processor not in current_processor_names:
try:
frame_processor_module = load_frame_processor_module(frame_processor)
FRAME_PROCESSORS_MODULES.remove(frame_processor_module)
modules.globals.frame_processors.remove(frame_processor)
except:
pass
FRAME_PROCESSORS_MODULES.append(frame_processor_module)
if frame_processor not in modules.globals.frame_processors:
modules.globals.frame_processors.append(frame_processor)
except SystemExit:
print(f"Warning: Failed to load frame processor {frame_processor} requested by UI state.")
except Exception as e:
print(f"Warning: Error loading frame processor {frame_processor} requested by UI state: {e}")
elif state == False and frame_processor in current_processor_names:
try:
module_to_remove = next((mod for mod in FRAME_PROCESSORS_MODULES if mod.__name__.endswith(f'.{frame_processor}')), None)
if module_to_remove:
FRAME_PROCESSORS_MODULES.remove(module_to_remove)
if frame_processor in modules.globals.frame_processors:
modules.globals.frame_processors.remove(frame_processor)
except Exception as e:
print(f"Warning: Error removing frame processor {frame_processor}: {e}")
def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_frames: Callable[[str, List[str], Any], None], progress: Any = None) -> None:
with ThreadPoolExecutor(max_workers=modules.globals.execution_threads) as executor:

View File

@ -48,6 +48,17 @@ def pre_start() -> bool:
return True
TENSORRT_AVAILABLE = False
try:
import torch_tensorrt
TENSORRT_AVAILABLE = True
except ImportError as im:
print(f"TensorRT is not available: {im}")
pass
except Exception as e:
print(f"TensorRT is not available: {e}")
pass
def get_face_enhancer() -> Any:
global FACE_ENHANCER
@ -55,16 +66,26 @@ def get_face_enhancer() -> Any:
if FACE_ENHANCER is None:
model_path = os.path.join(models_dir, "GFPGANv1.4.pth")
match platform.system():
case "Darwin": # Mac OS
if torch.backends.mps.is_available():
mps_device = torch.device("mps")
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1, device=mps_device) # type: ignore[attr-defined]
else:
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined]
case _: # Other OS
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined]
selected_device = None
device_priority = []
if TENSORRT_AVAILABLE and torch.cuda.is_available():
selected_device = torch.device("cuda")
device_priority.append("TensorRT+CUDA")
elif torch.cuda.is_available():
selected_device = torch.device("cuda")
device_priority.append("CUDA")
elif torch.backends.mps.is_available() and platform.system() == "Darwin":
selected_device = torch.device("mps")
device_priority.append("MPS")
elif not torch.cuda.is_available():
selected_device = torch.device("cpu")
device_priority.append("CPU")
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1, device=selected_device)
# for debug:
print(f"Selected device: {selected_device} and device priority: {device_priority}")
return FACE_ENHANCER

View File

@ -1,44 +1,56 @@
import os # <-- Added for os.path.exists
from typing import Any, List
import cv2
import insightface
import threading
import numpy as np
import modules.globals
import logging
import modules.processors.frame.core
# Ensure update_status is imported if not already globally accessible
# If it's part of modules.core, it might already be accessible via modules.core.update_status
from modules.core import update_status
from modules.face_analyser import get_one_face, get_many_faces, default_source_face
from modules.typing import Face, Frame
from modules.utilities import conditional_download, resolve_relative_path, is_image, is_video
from modules.utilities import (
conditional_download,
is_image,
is_video,
)
from modules.cluster_analysis import find_closest_centroid
import os
FACE_SWAPPER = None
THREAD_LOCK = threading.Lock()
NAME = 'DLC.FACE-SWAPPER'
NAME = "DLC.FACE-SWAPPER"
abs_dir = os.path.dirname(os.path.abspath(__file__))
models_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(abs_dir))), "models"
)
def pre_check() -> bool:
download_directory_path = resolve_relative_path('../models')
# Ensure both models are mentioned or downloaded if necessary
# Conditional download might need adjustment if you want it to fetch FP32 too
conditional_download(download_directory_path, ['https://huggingface.co/hacksider/deep-live-cam/blob/main/inswapper_128_fp16.onnx'])
# Add a check or download for the FP32 model if you have a URL
# conditional_download(download_directory_path, ['URL_TO_FP32_MODEL_HERE'])
download_directory_path = abs_dir
conditional_download(
download_directory_path,
[
"https://huggingface.co/hacksider/deep-live-cam/blob/main/inswapper_128_fp16.onnx"
],
)
return True
def pre_start() -> bool:
# --- No changes needed in pre_start ---
if not modules.globals.map_faces and not is_image(modules.globals.source_path):
update_status('Select an image for source path.', NAME)
update_status("Select an image for source path.", NAME)
return False
elif not modules.globals.map_faces and not get_one_face(cv2.imread(modules.globals.source_path)):
update_status('No face in source path detected.', NAME)
elif not modules.globals.map_faces and not get_one_face(
cv2.imread(modules.globals.source_path)
):
update_status("No face in source path detected.", NAME)
return False
if not is_image(modules.globals.target_path) and not is_video(modules.globals.target_path):
update_status('Select an image or video for target path.', NAME)
if not is_image(modules.globals.target_path) and not is_video(
modules.globals.target_path
):
update_status("Select an image or video for target path.", NAME)
return False
return True
@ -48,78 +60,67 @@ def get_face_swapper() -> Any:
with THREAD_LOCK:
if FACE_SWAPPER is None:
# --- MODIFICATION START ---
# Define paths for both FP32 and FP16 models
model_dir = resolve_relative_path('../models')
model_path_fp32 = os.path.join(model_dir, 'inswapper_128.onnx')
model_path_fp16 = os.path.join(model_dir, 'inswapper_128_fp16.onnx')
chosen_model_path = None
# Prioritize FP32 model
if os.path.exists(model_path_fp32):
chosen_model_path = model_path_fp32
update_status(f"Loading FP32 model: {os.path.basename(chosen_model_path)}", NAME)
# Fallback to FP16 model
elif os.path.exists(model_path_fp16):
chosen_model_path = model_path_fp16
update_status(f"FP32 model not found. Loading FP16 model: {os.path.basename(chosen_model_path)}", NAME)
# Error if neither model is found
else:
error_message = f"Face Swapper model not found. Please ensure 'inswapper_128.onnx' (recommended) or 'inswapper_128_fp16.onnx' exists in the '{model_dir}' directory."
update_status(error_message, NAME)
raise FileNotFoundError(error_message)
# Load the chosen model
try:
FACE_SWAPPER = insightface.model_zoo.get_model(chosen_model_path, providers=modules.globals.execution_providers)
except Exception as e:
update_status(f"Error loading Face Swapper model {os.path.basename(chosen_model_path)}: {e}", NAME)
# Optionally, re-raise the exception or handle it more gracefully
raise e
# --- MODIFICATION END ---
model_path = os.path.join(models_dir, "inswapper_128_fp16.onnx")
FACE_SWAPPER = insightface.model_zoo.get_model(
model_path, providers=modules.globals.execution_providers
)
return FACE_SWAPPER
def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
# --- No changes needed in swap_face ---
swapper = get_face_swapper()
if swapper is None:
# Handle case where model failed to load
update_status("Face swapper model not loaded, skipping swap.", NAME)
return temp_frame
return swapper.get(temp_frame, target_face, source_face, paste_back=True)
face_swapper = get_face_swapper()
# Apply the face swap
swapped_frame = face_swapper.get(
temp_frame, target_face, source_face, paste_back=True
)
if modules.globals.mouth_mask:
# Create a mask for the target face
face_mask = create_face_mask(target_face, temp_frame)
# Create the mouth mask
mouth_mask, mouth_cutout, mouth_box, lower_lip_polygon = (
create_lower_mouth_mask(target_face, temp_frame)
)
# Apply the mouth area
swapped_frame = apply_mouth_area(
swapped_frame, mouth_cutout, mouth_box, face_mask, lower_lip_polygon
)
if modules.globals.show_mouth_mask_box:
mouth_mask_data = (mouth_mask, mouth_cutout, mouth_box, lower_lip_polygon)
swapped_frame = draw_mouth_mask_visualization(
swapped_frame, target_face, mouth_mask_data
)
return swapped_frame
def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
# --- No changes needed in process_frame ---
# Ensure the frame is in RGB format if color correction is enabled
# Note: InsightFace swapper often expects BGR by default. Double-check if color issues appear.
# If color correction is needed *before* swapping and insightface needs BGR:
# original_was_bgr = True # Assume input is BGR
# if modules.globals.color_correction:
# temp_frame = cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB)
# original_was_bgr = False # Now it's RGB
if modules.globals.color_correction:
temp_frame = cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB)
if modules.globals.many_faces:
many_faces = get_many_faces(temp_frame)
if many_faces:
for target_face in many_faces:
temp_frame = swap_face(source_face, target_face, temp_frame)
if source_face and target_face:
temp_frame = swap_face(source_face, target_face, temp_frame)
else:
print("Face detection failed for target/source.")
else:
target_face = get_one_face(temp_frame)
if target_face:
if target_face and source_face:
temp_frame = swap_face(source_face, target_face, temp_frame)
# Convert back if necessary (example, might not be needed depending on workflow)
# if modules.globals.color_correction and not original_was_bgr:
# temp_frame = cv2.cvtColor(temp_frame, cv2.COLOR_RGB2BGR)
else:
logging.error("Face detection failed for target or source.")
return temp_frame
def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
# --- No changes needed in process_frame_v2 ---
# (Assuming swap_face handles the potential None return from get_face_swapper)
if is_image(modules.globals.target_path):
if modules.globals.many_faces:
source_face = default_source_face()
@ -141,7 +142,7 @@ def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
target_frame = [f for f in map['target_faces_in_frame'] if f['location'] == temp_frame_path]
for frame in target_frame:
for target_face in frame['faces']:
for target_face in frame["faces"]:
temp_frame = swap_face(source_face, target_face, temp_frame)
elif not modules.globals.many_faces:
@ -151,9 +152,10 @@ def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
source_face = map['source']['face']
for frame in target_frame:
for target_face in frame['faces']:
for target_face in frame["faces"]:
temp_frame = swap_face(source_face, target_face, temp_frame)
else: # Fallback for neither image nor video (e.g., live feed?)
else:
detected_faces = get_many_faces(temp_frame)
if modules.globals.many_faces:
if detected_faces:
@ -162,96 +164,89 @@ def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
temp_frame = swap_face(source_face, target_face, temp_frame)
elif not modules.globals.many_faces:
if detected_faces and hasattr(modules.globals, 'simple_map') and modules.globals.simple_map: # Check simple_map exists
if len(detected_faces) <= len(modules.globals.simple_map['target_embeddings']):
if detected_faces:
if len(detected_faces) <= len(
modules.globals.simple_map["target_embeddings"]
):
for detected_face in detected_faces:
closest_centroid_index, _ = find_closest_centroid(modules.globals.simple_map['target_embeddings'], detected_face.normed_embedding)
temp_frame = swap_face(modules.globals.simple_map['source_faces'][closest_centroid_index], detected_face, temp_frame)
closest_centroid_index, _ = find_closest_centroid(
modules.globals.simple_map["target_embeddings"],
detected_face.normed_embedding,
)
temp_frame = swap_face(
modules.globals.simple_map["source_faces"][
closest_centroid_index
],
detected_face,
temp_frame,
)
else:
detected_faces_centroids = [face.normed_embedding for face in detected_faces]
detected_faces_centroids = []
for face in detected_faces:
detected_faces_centroids.append(face.normed_embedding)
i = 0
for target_embedding in modules.globals.simple_map['target_embeddings']:
closest_centroid_index, _ = find_closest_centroid(detected_faces_centroids, target_embedding)
# Ensure index is valid before accessing detected_faces
if closest_centroid_index < len(detected_faces):
temp_frame = swap_face(modules.globals.simple_map['source_faces'][i], detected_faces[closest_centroid_index], temp_frame)
for target_embedding in modules.globals.simple_map[
"target_embeddings"
]:
closest_centroid_index, _ = find_closest_centroid(
detected_faces_centroids, target_embedding
)
temp_frame = swap_face(
modules.globals.simple_map["source_faces"][i],
detected_faces[closest_centroid_index],
temp_frame,
)
i += 1
return temp_frame
def process_frames(source_path: str, temp_frame_paths: List[str], progress: Any = None) -> None:
# --- No changes needed in process_frames ---
# Note: Ensure get_one_face is called only once if possible for efficiency if !map_faces
source_face = None
def process_frames(
source_path: str, temp_frame_paths: List[str], progress: Any = None
) -> None:
if not modules.globals.map_faces:
source_img = cv2.imread(source_path)
if source_img is not None:
source_face = get_one_face(source_img)
if source_face is None:
update_status(f"Could not find face in source image: {source_path}, skipping swap.", NAME)
# If no source face, maybe skip processing? Or handle differently.
# For now, it will proceed but swap_face might fail later.
for temp_frame_path in temp_frame_paths:
temp_frame = cv2.imread(temp_frame_path)
if temp_frame is None:
update_status(f"Warning: Could not read frame {temp_frame_path}", NAME)
if progress: progress.update(1) # Still update progress even if frame fails
continue # Skip to next frame
try:
if not modules.globals.map_faces:
if source_face: # Only process if source face was found
result = process_frame(source_face, temp_frame)
else:
result = temp_frame # No source face, return original frame
else:
result = process_frame_v2(temp_frame, temp_frame_path)
cv2.imwrite(temp_frame_path, result)
except Exception as exception:
update_status(f"Error processing frame {os.path.basename(temp_frame_path)}: {exception}", NAME)
# Decide whether to 'pass' (continue processing other frames) or raise
pass # Continue processing other frames
finally:
source_face = get_one_face(cv2.imread(source_path))
for temp_frame_path in temp_frame_paths:
temp_frame = cv2.imread(temp_frame_path)
try:
result = process_frame(source_face, temp_frame)
cv2.imwrite(temp_frame_path, result)
except Exception as exception:
print(exception)
pass
if progress:
progress.update(1)
else:
for temp_frame_path in temp_frame_paths:
temp_frame = cv2.imread(temp_frame_path)
try:
result = process_frame_v2(temp_frame, temp_frame_path)
cv2.imwrite(temp_frame_path, result)
except Exception as exception:
print(exception)
pass
if progress:
progress.update(1)
def process_image(source_path: str, target_path: str, output_path: str) -> None:
# --- No changes needed in process_image ---
# Note: Added checks for successful image reads and face detection
target_frame = cv2.imread(target_path) # Read original target for processing
if target_frame is None:
update_status(f"Error: Could not read target image: {target_path}", NAME)
return
if not modules.globals.map_faces:
source_img = cv2.imread(source_path)
if source_img is None:
update_status(f"Error: Could not read source image: {source_path}", NAME)
return
source_face = get_one_face(source_img)
if source_face is None:
update_status(f"Error: No face found in source image: {source_path}", NAME)
return
source_face = get_one_face(cv2.imread(source_path))
target_frame = cv2.imread(target_path)
result = process_frame(source_face, target_frame)
cv2.imwrite(output_path, result)
else:
if modules.globals.many_faces:
update_status('Many faces enabled. Using first source image (if applicable in v2). Processing...', NAME)
# For process_frame_v2 on single image, it reads the 'output_path' which should be a copy
# Let's process the 'target_frame' we read instead.
result = process_frame_v2(target_frame) # Process the frame directly
# Write the final result to the output path
success = cv2.imwrite(output_path, result)
if not success:
update_status(f"Error: Failed to write output image to: {output_path}", NAME)
update_status(
"Many faces enabled. Using first source image. Progressing...", NAME
)
target_frame = cv2.imread(output_path)
result = process_frame_v2(target_frame)
cv2.imwrite(output_path, result)
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
# --- No changes needed in process_video ---
if modules.globals.map_faces and modules.globals.many_faces:
update_status('Many faces enabled. Using first source image (if applicable in v2). Processing...', NAME)
# The core processing logic is delegated, which is good.

View File

@ -429,7 +429,7 @@ def create_source_target_popup(
POPUP.destroy()
select_output_path(start)
else:
update_pop_status("Atleast 1 source with target is required!")
update_pop_status("At least 1 source with target is required!")
scrollable_frame = ctk.CTkScrollableFrame(
POPUP, width=POPUP_SCROLL_WIDTH, height=POPUP_SCROLL_HEIGHT
@ -489,7 +489,7 @@ def update_popup_source(
global source_label_dict
source_path = ctk.filedialog.askopenfilename(
title=_("select an source image"),
title=_("select a source image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)
@ -584,7 +584,7 @@ def select_source_path() -> None:
PREVIEW.withdraw()
source_path = ctk.filedialog.askopenfilename(
title=_("select an source image"),
title=_("select a source image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)
@ -627,7 +627,7 @@ def select_target_path() -> None:
PREVIEW.withdraw()
target_path = ctk.filedialog.askopenfilename(
title=_("select an target image or video"),
title=_("select a target image or video"),
initialdir=RECENT_DIRECTORY_TARGET,
filetypes=[img_ft, vid_ft],
)
@ -1108,7 +1108,7 @@ def update_webcam_source(
global source_label_dict_live
source_path = ctk.filedialog.askopenfilename(
title=_("select an source image"),
title=_("select a source image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)
@ -1160,7 +1160,7 @@ def update_webcam_target(
global target_label_dict_live
target_path = ctk.filedialog.askopenfilename(
title=_("select an target image"),
title=_("select a target image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)

View File

@ -1,23 +1,21 @@
--extra-index-url https://download.pytorch.org/whl/cu118
numpy>=1.23.5,<2
typing-extensions>=4.8.0
opencv-python==4.11.0.86
onnx==1.17.0
cv2_enumerate_cameras==1.1.18.3
opencv-python==4.10.0.84
cv2_enumerate_cameras==1.1.15
onnx==1.16.0
insightface==0.7.3
psutil==5.9.8
tk==0.1.0
customtkinter==5.2.2
pillow==11.1.0
torch; sys_platform != 'darwin' --index-url https://download.pytorch.org/whl/cu126
torch; sys_platform == 'darwin' --index-url https://download.pytorch.org/whl/cu126
torchvision; sys_platform != 'darwin' --index-url https://download.pytorch.org/whl/cu126
torchvision; sys_platform == 'darwin' --index-url https://download.pytorch.org/whl/cu126
torch==2.5.1+cu118; sys_platform != 'darwin'
torch==2.5.1; sys_platform == 'darwin'
torchvision==0.20.1; sys_platform != 'darwin'
torchvision==0.20.1; sys_platform == 'darwin'
onnxruntime-silicon==1.16.3; sys_platform == 'darwin' and platform_machine == 'arm64'
onnxruntime-gpu==1.21; sys_platform != 'darwin'
onnxruntime-gpu==1.17; sys_platform != 'darwin'
tensorflow; sys_platform != 'darwin'
opennsfw2==0.10.2
protobuf==4.23.2
tqdm==4.66.4
gfpgan==1.3.8
tkinterdnd2==0.4.2
pygrabber==0.2