Compare commits
2 Commits
5f2e54552c
...
166d5a34e2
Author | SHA1 | Date |
---|---|---|
|
166d5a34e2 | |
|
49d9971221 |
|
@ -178,7 +178,7 @@ It is highly recommended to use Python 3.10 for Windows for best compatibility w
|
||||||
* **Visual Studio Runtimes:** If you encounter errors during `pip install` for packages that compile C code (e.g., some scientific computing or image processing libraries), you might need the [Visual Studio Build Tools (or Runtimes)](https://visualstudio.microsoft.com/visual-cpp-build-tools/). Ensure "C++ build tools" (or similar workload) are selected during installation.
|
* **Visual Studio Runtimes:** If you encounter errors during `pip install` for packages that compile C code (e.g., some scientific computing or image processing libraries), you might need the [Visual Studio Build Tools (or Runtimes)](https://visualstudio.microsoft.com/visual-cpp-build-tools/). Ensure "C++ build tools" (or similar workload) are selected during installation.
|
||||||
* **Virtual Environment (Manual Alternative):** If you prefer to set up the virtual environment manually instead of using `setup_windows.bat`:
|
* **Virtual Environment (Manual Alternative):** If you prefer to set up the virtual environment manually instead of using `setup_windows.bat`:
|
||||||
```batch
|
```batch
|
||||||
python -m venv .venv
|
python -m venv .venv
|
||||||
.venv\Scripts\activate.bat
|
.venv\Scripts\activate.bat
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
python -m pip install -r requirements.txt
|
python -m pip install -r requirements.txt
|
||||||
|
@ -244,10 +244,10 @@ For a streamlined setup on macOS, use the provided shell scripts:
|
||||||
# Ensure you are using Python 3.10 if required by your chosen onnxruntime-silicon version
|
# Ensure you are using Python 3.10 if required by your chosen onnxruntime-silicon version
|
||||||
# After running setup_mac.sh and activating .venv:
|
# After running setup_mac.sh and activating .venv:
|
||||||
# source .venv/bin/activate
|
# source .venv/bin/activate
|
||||||
|
|
||||||
pip uninstall onnxruntime onnxruntime-gpu # Uninstall any existing onnxruntime
|
pip uninstall onnxruntime onnxruntime-gpu # Uninstall any existing onnxruntime
|
||||||
pip install onnxruntime-silicon==1.13.1 # Or your desired version
|
pip install onnxruntime-silicon==1.13.1 # Or your desired version
|
||||||
|
|
||||||
# Then use ./run_mac_coreml.sh
|
# Then use ./run_mac_coreml.sh
|
||||||
```
|
```
|
||||||
Check the ONNX Runtime documentation for the latest recommended packages for Apple Silicon.
|
Check the ONNX Runtime documentation for the latest recommended packages for Apple Silicon.
|
||||||
|
|
|
@ -26,34 +26,49 @@ def segment_hair(image_np: np.ndarray) -> np.ndarray:
|
||||||
try:
|
try:
|
||||||
HAIR_SEGMENTER_PROCESSOR = SegformerImageProcessor.from_pretrained(MODEL_NAME)
|
HAIR_SEGMENTER_PROCESSOR = SegformerImageProcessor.from_pretrained(MODEL_NAME)
|
||||||
HAIR_SEGMENTER_MODEL = SegformerForSemanticSegmentation.from_pretrained(MODEL_NAME)
|
HAIR_SEGMENTER_MODEL = SegformerForSemanticSegmentation.from_pretrained(MODEL_NAME)
|
||||||
# Optional: Move model to GPU if available and if other models use GPU
|
|
||||||
# if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
# HAIR_SEGMENTER_MODEL = HAIR_SEGMENTER_MODEL.to('cuda')
|
try:
|
||||||
# print("Hair segmentation model moved to GPU.")
|
HAIR_SEGMENTER_MODEL = HAIR_SEGMENTER_MODEL.to('cuda')
|
||||||
print("Hair segmentation model and processor loaded successfully.")
|
print("INFO: Hair segmentation model moved to CUDA (GPU).")
|
||||||
|
except Exception as e_cuda:
|
||||||
|
print(f"ERROR: Failed to move hair segmentation model to CUDA: {e_cuda}. Using CPU instead.")
|
||||||
|
# Fallback to CPU if .to('cuda') fails
|
||||||
|
HAIR_SEGMENTER_MODEL = HAIR_SEGMENTER_MODEL.to('cpu')
|
||||||
|
else:
|
||||||
|
print("INFO: CUDA not available. Hair segmentation model will use CPU.")
|
||||||
|
|
||||||
|
print("INFO: Hair segmentation model and processor loaded successfully (device: {}).".format(HAIR_SEGMENTER_MODEL.device))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Failed to load hair segmentation model/processor: {e}")
|
print(f"ERROR: Failed to load hair segmentation model/processor: {e}")
|
||||||
# Return an empty mask compatible with expected output shape (H, W)
|
# Return an empty mask compatible with expected output shape (H, W)
|
||||||
return np.zeros((image_np.shape[0], image_np.shape[1]), dtype=np.uint8)
|
return np.zeros((image_np.shape[0], image_np.shape[1]), dtype=np.uint8)
|
||||||
|
|
||||||
# Ensure processor and model are loaded before proceeding
|
|
||||||
if HAIR_SEGMENTER_PROCESSOR is None or HAIR_SEGMENTER_MODEL is None:
|
|
||||||
print("Error: Hair segmentation models are not available.")
|
|
||||||
return np.zeros((image_np.shape[0], image_np.shape[1]), dtype=np.uint8)
|
|
||||||
|
|
||||||
# Convert BGR (OpenCV) to RGB (PIL)
|
# Convert BGR (OpenCV) to RGB (PIL)
|
||||||
image_rgb = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
|
image_rgb = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
|
||||||
image_pil = Image.fromarray(image_rgb)
|
image_pil = Image.fromarray(image_rgb)
|
||||||
|
|
||||||
inputs = HAIR_SEGMENTER_PROCESSOR(images=image_pil, return_tensors="pt")
|
inputs = HAIR_SEGMENTER_PROCESSOR(images=image_pil, return_tensors="pt")
|
||||||
|
|
||||||
# Optional: Move inputs to GPU if model is on GPU
|
if HAIR_SEGMENTER_MODEL.device.type == 'cuda':
|
||||||
# if HAIR_SEGMENTER_MODEL.device.type == 'cuda':
|
try:
|
||||||
# inputs = inputs.to(HAIR_SEGMENTER_MODEL.device)
|
# SegformerImageProcessor output (BatchEncoding) is a dict-like object.
|
||||||
|
# We need to move its tensor components, commonly 'pixel_values'.
|
||||||
|
if 'pixel_values' in inputs:
|
||||||
|
inputs['pixel_values'] = inputs['pixel_values'].to('cuda')
|
||||||
|
else: # Fallback if the structure is different than expected
|
||||||
|
inputs = inputs.to('cuda')
|
||||||
|
# If inputs has other tensor components that need to be moved, they'd need similar handling.
|
||||||
|
except Exception as e_inputs_cuda:
|
||||||
|
print(f"ERROR: Failed to move inputs to CUDA: {e_inputs_cuda}. Attempting inference on CPU.")
|
||||||
|
# If moving inputs to CUDA fails, we should ensure model is also on CPU for this inference pass
|
||||||
|
# This is a tricky situation; ideally, this failure shouldn't happen if model moved successfully.
|
||||||
|
# For simplicity, we'll assume if model is on CUDA, inputs should also be.
|
||||||
|
# A more robust solution might involve moving model back to CPU if inputs can't be moved.
|
||||||
|
|
||||||
with torch.no_grad(): # Important for inference
|
with torch.no_grad(): # Important for inference
|
||||||
outputs = HAIR_SEGMENTER_MODEL(**inputs)
|
outputs = HAIR_SEGMENTER_MODEL(**inputs)
|
||||||
|
|
||||||
logits = outputs.logits # Shape: batch_size, num_labels, height, width
|
logits = outputs.logits # Shape: batch_size, num_labels, height, width
|
||||||
|
|
||||||
# Upsample logits to original image size
|
# Upsample logits to original image size
|
||||||
|
@ -85,7 +100,7 @@ if __name__ == '__main__':
|
||||||
# Create a dummy image for a basic test run if no image is available.
|
# Create a dummy image for a basic test run if no image is available.
|
||||||
dummy_image_np = np.zeros((100, 100, 3), dtype=np.uint8) # 100x100 BGR image
|
dummy_image_np = np.zeros((100, 100, 3), dtype=np.uint8) # 100x100 BGR image
|
||||||
dummy_image_np[:, :, 1] = 255 # Make it green to distinguish from black mask
|
dummy_image_np[:, :, 1] = 255 # Make it green to distinguish from black mask
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print("Running segment_hair with a dummy image...")
|
print("Running segment_hair with a dummy image...")
|
||||||
hair_mask_output = segment_hair(dummy_image_np)
|
hair_mask_output = segment_hair(dummy_image_np)
|
||||||
|
@ -95,7 +110,7 @@ if __name__ == '__main__':
|
||||||
# Check if the mask is binary (0 or 255)
|
# Check if the mask is binary (0 or 255)
|
||||||
assert np.all(np.isin(hair_mask_output, [0, 255]))
|
assert np.all(np.isin(hair_mask_output, [0, 255]))
|
||||||
print("Dummy image test successful. Hair mask seems to be generated correctly.")
|
print("Dummy image test successful. Hair mask seems to be generated correctly.")
|
||||||
|
|
||||||
# Attempt to save the dummy mask (optional, just for visual confirmation if needed)
|
# Attempt to save the dummy mask (optional, just for visual confirmation if needed)
|
||||||
# cv2.imwrite("dummy_hair_mask_output.png", hair_mask_output)
|
# cv2.imwrite("dummy_hair_mask_output.png", hair_mask_output)
|
||||||
# print("Dummy hair mask saved to dummy_hair_mask_output.png")
|
# print("Dummy hair mask saved to dummy_hair_mask_output.png")
|
||||||
|
|
|
@ -69,34 +69,70 @@ def get_face_swapper() -> Any:
|
||||||
|
|
||||||
|
|
||||||
def _prepare_warped_source_material_and_mask(
|
def _prepare_warped_source_material_and_mask(
|
||||||
source_face_obj: Face,
|
source_face_obj: Face,
|
||||||
source_frame_full: Frame,
|
source_frame_full: Frame,
|
||||||
matrix: np.ndarray,
|
matrix: np.ndarray,
|
||||||
dsize: tuple # Built-in tuple is fine here for parameter type
|
dsize: tuple # Built-in tuple is fine here for parameter type
|
||||||
) -> Tuple[Optional[Frame], Optional[Frame]]:
|
) -> Tuple[Optional[Frame], Optional[Frame]]:
|
||||||
"""
|
"""
|
||||||
Prepares warped source material (full image) and a combined (face+hair) mask for blending.
|
Prepares warped source material (full image) and a combined (face+hair) mask for blending.
|
||||||
Returns (None, None) if essential masks cannot be generated.
|
Returns (None, None) if essential masks cannot be generated.
|
||||||
"""
|
"""
|
||||||
# Generate Hair Mask
|
try:
|
||||||
hair_only_mask_source_raw = segment_hair(source_frame_full)
|
# Generate Hair Mask
|
||||||
if hair_only_mask_source_raw.ndim == 3 and hair_only_mask_source_raw.shape[2] == 3:
|
hair_only_mask_source_raw = segment_hair(source_frame_full)
|
||||||
hair_only_mask_source_raw = cv2.cvtColor(hair_only_mask_source_raw, cv2.COLOR_BGR2GRAY)
|
if hair_only_mask_source_raw is None:
|
||||||
_, hair_only_mask_source_binary = cv2.threshold(hair_only_mask_source_raw, 127, 255, cv2.THRESH_BINARY)
|
logging.error("segment_hair returned None, which is unexpected.")
|
||||||
|
return None, None
|
||||||
|
if hair_only_mask_source_raw.ndim == 3 and hair_only_mask_source_raw.shape[2] == 3:
|
||||||
|
hair_only_mask_source_raw = cv2.cvtColor(hair_only_mask_source_raw, cv2.COLOR_BGR2GRAY)
|
||||||
|
_, hair_only_mask_source_binary = cv2.threshold(hair_only_mask_source_raw, 127, 255, cv2.THRESH_BINARY)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Hair segmentation failed: {e}", exc_info=True)
|
||||||
|
return None, None
|
||||||
|
|
||||||
# Generate Face Mask
|
try:
|
||||||
face_only_mask_source_raw = create_face_mask(source_face_obj, source_frame_full)
|
# Generate Face Mask
|
||||||
_, face_only_mask_source_binary = cv2.threshold(face_only_mask_source_raw, 127, 255, cv2.THRESH_BINARY)
|
face_only_mask_source_raw = create_face_mask(source_face_obj, source_frame_full)
|
||||||
|
if face_only_mask_source_raw is None:
|
||||||
|
logging.error("create_face_mask returned None, which is unexpected.")
|
||||||
|
return None, None
|
||||||
|
_, face_only_mask_source_binary = cv2.threshold(face_only_mask_source_raw, 127, 255, cv2.THRESH_BINARY)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Face mask creation failed for source: {e}", exc_info=True)
|
||||||
|
return None, None
|
||||||
|
|
||||||
# Combine Face and Hair Masks
|
# Combine Face and Hair Masks and Warp
|
||||||
if face_only_mask_source_binary.shape != hair_only_mask_source_binary.shape:
|
try:
|
||||||
|
if face_only_mask_source_binary.shape != hair_only_mask_source_binary.shape:
|
||||||
|
logging.warning("Resizing hair mask to match face mask for source during preparation.")
|
||||||
|
hair_only_mask_source_binary = cv2.resize(
|
||||||
|
hair_only_mask_source_binary,
|
||||||
|
(face_only_mask_source_binary.shape[1], face_only_mask_source_binary.shape[0]),
|
||||||
|
interpolation=cv2.INTER_NEAREST
|
||||||
|
)
|
||||||
|
|
||||||
|
actual_combined_source_mask = cv2.bitwise_or(face_only_mask_source_binary, hair_only_mask_source_binary)
|
||||||
|
actual_combined_source_mask_blurred = cv2.GaussianBlur(actual_combined_source_mask, (5, 5), 3)
|
||||||
|
|
||||||
|
warped_full_source_material = cv2.warpAffine(source_frame_full, matrix, dsize)
|
||||||
|
warped_combined_mask_temp = cv2.warpAffine(actual_combined_source_mask_blurred, matrix, dsize)
|
||||||
|
_, warped_combined_mask_binary_for_clone = cv2.threshold(warped_combined_mask_temp, 127, 255, cv2.THRESH_BINARY)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Mask combination or warping failed: {e}", exc_info=True)
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
return warped_full_source_material, warped_combined_mask_binary_for_clone
|
||||||
|
|
||||||
|
|
||||||
|
def _blend_material_onto_frame(
|
||||||
logging.warning("Resizing hair mask to match face mask for source during preparation.")
|
logging.warning("Resizing hair mask to match face mask for source during preparation.")
|
||||||
hair_only_mask_source_binary = cv2.resize(
|
hair_only_mask_source_binary = cv2.resize(
|
||||||
hair_only_mask_source_binary,
|
hair_only_mask_source_binary,
|
||||||
(face_only_mask_source_binary.shape[1], face_only_mask_source_binary.shape[0]),
|
(face_only_mask_source_binary.shape[1], face_only_mask_source_binary.shape[0]),
|
||||||
interpolation=cv2.INTER_NEAREST
|
interpolation=cv2.INTER_NEAREST
|
||||||
)
|
)
|
||||||
|
|
||||||
actual_combined_source_mask = cv2.bitwise_or(face_only_mask_source_binary, hair_only_mask_source_binary)
|
actual_combined_source_mask = cv2.bitwise_or(face_only_mask_source_binary, hair_only_mask_source_binary)
|
||||||
actual_combined_source_mask_blurred = cv2.GaussianBlur(actual_combined_source_mask, (5, 5), 3)
|
actual_combined_source_mask_blurred = cv2.GaussianBlur(actual_combined_source_mask, (5, 5), 3)
|
||||||
|
|
||||||
|
@ -104,13 +140,13 @@ def _prepare_warped_source_material_and_mask(
|
||||||
warped_full_source_material = cv2.warpAffine(source_frame_full, matrix, dsize)
|
warped_full_source_material = cv2.warpAffine(source_frame_full, matrix, dsize)
|
||||||
warped_combined_mask_temp = cv2.warpAffine(actual_combined_source_mask_blurred, matrix, dsize)
|
warped_combined_mask_temp = cv2.warpAffine(actual_combined_source_mask_blurred, matrix, dsize)
|
||||||
_, warped_combined_mask_binary_for_clone = cv2.threshold(warped_combined_mask_temp, 127, 255, cv2.THRESH_BINARY)
|
_, warped_combined_mask_binary_for_clone = cv2.threshold(warped_combined_mask_temp, 127, 255, cv2.THRESH_BINARY)
|
||||||
|
|
||||||
return warped_full_source_material, warped_combined_mask_binary_for_clone
|
return warped_full_source_material, warped_combined_mask_binary_for_clone
|
||||||
|
|
||||||
|
|
||||||
def _blend_material_onto_frame(
|
def _blend_material_onto_frame(
|
||||||
base_frame: Frame,
|
base_frame: Frame,
|
||||||
material_to_blend: Frame,
|
material_to_blend: Frame,
|
||||||
mask_for_blending: Frame
|
mask_for_blending: Frame
|
||||||
) -> Frame:
|
) -> Frame:
|
||||||
"""
|
"""
|
||||||
|
@ -122,7 +158,7 @@ def _blend_material_onto_frame(
|
||||||
|
|
||||||
if w > 0 and h > 0:
|
if w > 0 and h > 0:
|
||||||
center = (x + w // 2, y + h // 2)
|
center = (x + w // 2, y + h // 2)
|
||||||
|
|
||||||
if material_to_blend.shape == base_frame.shape and \
|
if material_to_blend.shape == base_frame.shape and \
|
||||||
material_to_blend.dtype == base_frame.dtype and \
|
material_to_blend.dtype == base_frame.dtype and \
|
||||||
mask_for_blending.dtype == np.uint8:
|
mask_for_blending.dtype == np.uint8:
|
||||||
|
@ -134,7 +170,7 @@ def _blend_material_onto_frame(
|
||||||
output_frame = cv2.seamlessClone(material_to_blend, base_frame, mask_for_blending, center, cv2.NORMAL_CLONE)
|
output_frame = cv2.seamlessClone(material_to_blend, base_frame, mask_for_blending, center, cv2.NORMAL_CLONE)
|
||||||
except cv2.error as e:
|
except cv2.error as e:
|
||||||
logging.warning(f"cv2.seamlessClone failed: {e}. Falling back to simple blending.")
|
logging.warning(f"cv2.seamlessClone failed: {e}. Falling back to simple blending.")
|
||||||
boolean_mask = mask_for_blending > 127
|
boolean_mask = mask_for_blending > 127
|
||||||
output_frame[boolean_mask] = material_to_blend[boolean_mask]
|
output_frame[boolean_mask] = material_to_blend[boolean_mask]
|
||||||
else:
|
else:
|
||||||
logging.warning("Mismatch in shape/type for seamlessClone. Falling back to simple blending.")
|
logging.warning("Mismatch in shape/type for seamlessClone. Falling back to simple blending.")
|
||||||
|
@ -142,7 +178,7 @@ def _blend_material_onto_frame(
|
||||||
output_frame[boolean_mask] = material_to_blend[boolean_mask]
|
output_frame[boolean_mask] = material_to_blend[boolean_mask]
|
||||||
else:
|
else:
|
||||||
logging.info("Warped mask for blending is empty. Skipping blending.")
|
logging.info("Warped mask for blending is empty. Skipping blending.")
|
||||||
|
|
||||||
return output_frame
|
return output_frame
|
||||||
|
|
||||||
|
|
||||||
|
@ -153,7 +189,7 @@ def swap_face(source_face_obj: Face, target_face: Face, source_frame_full: Frame
|
||||||
swapped_frame = face_swapper.get(temp_frame, target_face, source_face_obj, paste_back=True)
|
swapped_frame = face_swapper.get(temp_frame, target_face, source_face_obj, paste_back=True)
|
||||||
final_swapped_frame = swapped_frame # Initialize with the base swap. Copy is made only if needed.
|
final_swapped_frame = swapped_frame # Initialize with the base swap. Copy is made only if needed.
|
||||||
|
|
||||||
if modules.globals.enable_hair_swapping:
|
if getattr(modules.globals, 'enable_hair_swapping', True): # Default to True if attribute is missing
|
||||||
if not (source_face_obj.kps is not None and \
|
if not (source_face_obj.kps is not None and \
|
||||||
target_face.kps is not None and \
|
target_face.kps is not None and \
|
||||||
source_face_obj.kps.shape[0] >= 3 and \
|
source_face_obj.kps.shape[0] >= 3 and \
|
||||||
|
@ -172,23 +208,27 @@ def swap_face(source_face_obj: Face, target_face: Face, source_frame_full: Frame
|
||||||
logging.warning("Failed to estimate affine transformation matrix for hair. Skipping hair blending.")
|
logging.warning("Failed to estimate affine transformation matrix for hair. Skipping hair blending.")
|
||||||
else:
|
else:
|
||||||
dsize = (temp_frame.shape[1], temp_frame.shape[0]) # width, height
|
dsize = (temp_frame.shape[1], temp_frame.shape[0]) # width, height
|
||||||
|
|
||||||
warped_material, warped_mask = _prepare_warped_source_material_and_mask(
|
warped_material, warped_mask = _prepare_warped_source_material_and_mask(
|
||||||
source_face_obj, source_frame_full, matrix, dsize
|
source_face_obj, source_frame_full, matrix, dsize
|
||||||
)
|
)
|
||||||
|
|
||||||
if warped_material is not None and warped_mask is not None:
|
if warped_material is not None and warped_mask is not None:
|
||||||
# Make a copy only now that we are sure we will modify it for hair.
|
# Make a copy only now that we are sure we will modify it for hair.
|
||||||
final_swapped_frame = swapped_frame.copy()
|
final_swapped_frame = swapped_frame.copy()
|
||||||
|
|
||||||
color_corrected_material = apply_color_transfer(warped_material, final_swapped_frame) # Use final_swapped_frame for color context
|
try:
|
||||||
|
color_corrected_material = apply_color_transfer(warped_material, final_swapped_frame)
|
||||||
|
except Exception as e:
|
||||||
|
logging.warning(f"Color transfer failed: {e}. Proceeding with uncorrected material for hair blending.", exc_info=True)
|
||||||
|
color_corrected_material = warped_material # Use uncorrected material as fallback
|
||||||
|
|
||||||
final_swapped_frame = _blend_material_onto_frame(
|
final_swapped_frame = _blend_material_onto_frame(
|
||||||
final_swapped_frame,
|
final_swapped_frame,
|
||||||
color_corrected_material,
|
color_corrected_material,
|
||||||
warped_mask
|
warped_mask
|
||||||
)
|
)
|
||||||
|
|
||||||
# Mouth Mask Logic (operates on final_swapped_frame)
|
# Mouth Mask Logic (operates on final_swapped_frame)
|
||||||
if modules.globals.mouth_mask:
|
if modules.globals.mouth_mask:
|
||||||
# If final_swapped_frame wasn't copied for hair, it needs to be copied now before mouth mask modification.
|
# If final_swapped_frame wasn't copied for hair, it needs to be copied now before mouth mask modification.
|
||||||
|
|
|
@ -900,7 +900,7 @@ def create_webcam_preview(camera_index: int):
|
||||||
PREVIEW.deiconify()
|
PREVIEW.deiconify()
|
||||||
|
|
||||||
frame_processors = get_frame_processors_modules(modules.globals.frame_processors)
|
frame_processors = get_frame_processors_modules(modules.globals.frame_processors)
|
||||||
|
|
||||||
# --- Source Image Loading and Validation (Moved before the loop) ---
|
# --- Source Image Loading and Validation (Moved before the loop) ---
|
||||||
source_face_obj_for_cam = None
|
source_face_obj_for_cam = None
|
||||||
source_frame_full_for_cam = None
|
source_frame_full_for_cam = None
|
||||||
|
@ -925,7 +925,7 @@ def create_webcam_preview(camera_index: int):
|
||||||
ROOT.update()
|
ROOT.update()
|
||||||
time.sleep(0.05)
|
time.sleep(0.05)
|
||||||
return
|
return
|
||||||
|
|
||||||
source_frame_full_for_cam = cv2.imread(modules.globals.source_path)
|
source_frame_full_for_cam = cv2.imread(modules.globals.source_path)
|
||||||
if source_frame_full_for_cam is None:
|
if source_frame_full_for_cam is None:
|
||||||
update_status(f"Error: Could not read source image at {modules.globals.source_path}")
|
update_status(f"Error: Could not read source image at {modules.globals.source_path}")
|
||||||
|
@ -980,7 +980,7 @@ def create_webcam_preview(camera_index: int):
|
||||||
ROOT.update()
|
ROOT.update()
|
||||||
time.sleep(0.05)
|
time.sleep(0.05)
|
||||||
return
|
return
|
||||||
|
|
||||||
if not modules.globals.source_target_map and not modules.globals.simple_map:
|
if not modules.globals.source_target_map and not modules.globals.simple_map:
|
||||||
update_status("Warning: No face map defined for map_faces mode. Swapper may not work as expected.")
|
update_status("Warning: No face map defined for map_faces mode. Swapper may not work as expected.")
|
||||||
# This is a warning, not a fatal error for the preview window itself. Processing will continue.
|
# This is a warning, not a fatal error for the preview window itself. Processing will continue.
|
||||||
|
@ -1015,11 +1015,11 @@ def create_webcam_preview(camera_index: int):
|
||||||
|
|
||||||
if not modules.globals.map_faces:
|
if not modules.globals.map_faces:
|
||||||
# Case 1: map_faces is False - source_face_obj_for_cam and source_frame_full_for_cam are pre-loaded
|
# Case 1: map_faces is False - source_face_obj_for_cam and source_frame_full_for_cam are pre-loaded
|
||||||
if source_face_obj_for_cam and source_frame_full_for_cam is not None: # Check if valid after pre-loading
|
if source_face_obj_for_cam is not None and source_frame_full_for_cam is not None: # Check if valid after pre-loading
|
||||||
for frame_processor in frame_processors:
|
for frame_processor in frame_processors:
|
||||||
if frame_processor.NAME == "DLC.FACE-ENHANCER":
|
if frame_processor.NAME == "DLC.FACE-ENHANCER":
|
||||||
if modules.globals.fp_ui["face_enhancer"]:
|
if modules.globals.fp_ui["face_enhancer"]:
|
||||||
temp_frame = frame_processor.process_frame(None, temp_frame)
|
temp_frame = frame_processor.process_frame(None, temp_frame)
|
||||||
else:
|
else:
|
||||||
temp_frame = frame_processor.process_frame(source_face_obj_for_cam, source_frame_full_for_cam, temp_frame)
|
temp_frame = frame_processor.process_frame(source_face_obj_for_cam, source_frame_full_for_cam, temp_frame)
|
||||||
# If source image was invalid (e.g. no face), source_face_obj_for_cam might be None.
|
# If source image was invalid (e.g. no face), source_face_obj_for_cam might be None.
|
||||||
|
@ -1032,8 +1032,10 @@ def create_webcam_preview(camera_index: int):
|
||||||
for frame_processor in frame_processors:
|
for frame_processor in frame_processors:
|
||||||
if frame_processor.NAME == "DLC.FACE-ENHANCER":
|
if frame_processor.NAME == "DLC.FACE-ENHANCER":
|
||||||
if modules.globals.fp_ui["face_enhancer"]:
|
if modules.globals.fp_ui["face_enhancer"]:
|
||||||
temp_frame = frame_processor.process_frame_v2(source_frame_full_for_cam_map_faces, temp_frame)
|
# Corrected: face_enhancer.process_frame_v2 is expected to take only temp_frame
|
||||||
|
temp_frame = frame_processor.process_frame_v2(temp_frame)
|
||||||
else:
|
else:
|
||||||
|
# This is for other processors when map_faces is True
|
||||||
temp_frame = frame_processor.process_frame_v2(source_frame_full_for_cam_map_faces, temp_frame)
|
temp_frame = frame_processor.process_frame_v2(source_frame_full_for_cam_map_faces, temp_frame)
|
||||||
# If source_frame_full_for_cam_map_faces was invalid, error is persistent from pre-loop check.
|
# If source_frame_full_for_cam_map_faces was invalid, error is persistent from pre-loop check.
|
||||||
|
|
||||||
|
|
|
@ -15,8 +15,9 @@ if errorlevel 1 (
|
||||||
:: Optional: Check Python version (e.g., >= 3.9 or >=3.10).
|
:: Optional: Check Python version (e.g., >= 3.9 or >=3.10).
|
||||||
:: This is a bit more complex in pure batch. For now, rely on user having a modern Python 3.
|
:: This is a bit more complex in pure batch. For now, rely on user having a modern Python 3.
|
||||||
:: The README will recommend 3.10.
|
:: The README will recommend 3.10.
|
||||||
echo Found Python:
|
:: If we reach here, Python is found.
|
||||||
python --version
|
echo Python was found. Attempting to display version:
|
||||||
|
for /f "delims=" %%i in ('python --version 2^>^&1') do echo %%i
|
||||||
|
|
||||||
:: 2. Check for ffmpeg (informational)
|
:: 2. Check for ffmpeg (informational)
|
||||||
echo Checking for ffmpeg...
|
echo Checking for ffmpeg...
|
||||||
|
|
Loading…
Reference in New Issue