mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2025-01-29 15:44:12 +00:00
added gpu model name
This commit is contained in:
parent
d7aad205fa
commit
ff80083126
4
app.py
4
app.py
@ -563,7 +563,7 @@ class LoLLMsWebUI(LoLLMsAPPI):
|
||||
|
||||
def vram_usage(self) -> Optional[dict]:
|
||||
try:
|
||||
output = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.total,memory.used', '--format=csv,nounits,noheader'])
|
||||
output = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.total,memory.used,gpu_name', '--format=csv,nounits,noheader'])
|
||||
lines = output.decode().strip().split('\n')
|
||||
vram_info = [line.split(',') for line in lines]
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
@ -579,10 +579,12 @@ class LoLLMsWebUI(LoLLMsAPPI):
|
||||
for i, gpu in enumerate(vram_info):
|
||||
ram_usage[f"gpu_{i}_total_vram"] = int(gpu[0])*1024*1024
|
||||
ram_usage[f"gpu_{i}_used_vram"] = int(gpu[1])*1024*1024
|
||||
ram_usage[f"gpu_{i}_model"] = gpu[2].strip()
|
||||
else:
|
||||
# Set all VRAM-related entries to None
|
||||
ram_usage["gpu_0_total_vram"] = None
|
||||
ram_usage["gpu_0_used_vram"] = None
|
||||
ram_usage["gpu_0_model"] = None
|
||||
|
||||
return jsonify(ram_usage)
|
||||
|
||||
|
1
src/clip
Submodule
1
src/clip
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit a9b1bf5920416aaeaec965c25dd9e8f98c864f16
|
1
src/taming-transformers
Submodule
1
src/taming-transformers
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 3ba01b241669f5ade541ce990f7650a3b8f65318
|
Loading…
x
Reference in New Issue
Block a user