From ff80083126d4172aab46e33648a7a7e35e26f093 Mon Sep 17 00:00:00 2001 From: andzejsp Date: Sat, 1 Jul 2023 14:42:39 +0300 Subject: [PATCH] added gpu model name --- app.py | 4 +++- src/clip | 1 + src/taming-transformers | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) create mode 160000 src/clip create mode 160000 src/taming-transformers diff --git a/app.py b/app.py index e00cb854..47eb8073 100644 --- a/app.py +++ b/app.py @@ -563,7 +563,7 @@ class LoLLMsWebUI(LoLLMsAPPI): def vram_usage(self) -> Optional[dict]: try: - output = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.total,memory.used', '--format=csv,nounits,noheader']) + output = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.total,memory.used,gpu_name', '--format=csv,nounits,noheader']) lines = output.decode().strip().split('\n') vram_info = [line.split(',') for line in lines] except (subprocess.CalledProcessError, FileNotFoundError): @@ -579,10 +579,12 @@ class LoLLMsWebUI(LoLLMsAPPI): for i, gpu in enumerate(vram_info): ram_usage[f"gpu_{i}_total_vram"] = int(gpu[0])*1024*1024 ram_usage[f"gpu_{i}_used_vram"] = int(gpu[1])*1024*1024 + ram_usage[f"gpu_{i}_model"] = gpu[2].strip() else: # Set all VRAM-related entries to None ram_usage["gpu_0_total_vram"] = None ram_usage["gpu_0_used_vram"] = None + ram_usage["gpu_0_model"] = None return jsonify(ram_usage) diff --git a/src/clip b/src/clip new file mode 160000 index 00000000..a9b1bf59 --- /dev/null +++ b/src/clip @@ -0,0 +1 @@ +Subproject commit a9b1bf5920416aaeaec965c25dd9e8f98c864f16 diff --git a/src/taming-transformers b/src/taming-transformers new file mode 160000 index 00000000..3ba01b24 --- /dev/null +++ b/src/taming-transformers @@ -0,0 +1 @@ +Subproject commit 3ba01b241669f5ade541ce990f7650a3b8f65318