From bf9caf9945ae81e10a8b2639ab60aa70727e874a Mon Sep 17 00:00:00 2001 From: MYusufY Date: Fri, 30 Aug 2024 09:31:46 +0300 Subject: [PATCH] using cupy for gpu count, instead tensorflow --- modules/core.py | 14 +++++++------- requirements.txt | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/modules/core.py b/modules/core.py index 9de11ca..21c34eb 100644 --- a/modules/core.py +++ b/modules/core.py @@ -3,8 +3,7 @@ import sys # single thread doubles cuda performance - needs to be set before torch import if any(arg.startswith('--execution-provider') for arg in sys.argv): os.environ['OMP_NUM_THREADS'] = '1' -# reduce tensorflow log level -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' + import warnings from typing import List import platform @@ -13,7 +12,7 @@ import shutil import argparse import torch import onnxruntime -import tensorflow +import cupy as cp import modules.globals import modules.metadata @@ -131,10 +130,11 @@ def suggest_execution_threads() -> int: def limit_resources() -> None: - # prevent tensorflow memory leak - gpus = tensorflow.config.experimental.list_physical_devices('GPU') - for gpu in gpus: - tensorflow.config.experimental.set_memory_growth(gpu, True) + gpus = cp.cuda.runtime.getDeviceCount() + + for i in range(gpus): + device = cp.cuda.Device(i) + device.use() # limit memory usage if modules.globals.max_memory: memory = modules.globals.max_memory * 1024 ** 3 diff --git a/requirements.txt b/requirements.txt index f65195e..75338d9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,3 +21,4 @@ opennsfw2==0.10.2 protobuf==4.23.2 tqdm==4.66.4 gfpgan==1.3.8 +cupy==1.6.3 \ No newline at end of file