enhanced code execution

This commit is contained in:
Saifeddine ALOUI 2024-01-08 00:22:23 +01:00
parent de8adeb8d4
commit 53dbdeb49e
9 changed files with 229 additions and 207 deletions

90
app.py
View File

@ -282,7 +282,7 @@ try:
self.add_endpoint("/get_active_model", "get_active_model", self.get_active_model, methods=["GET"])
self.add_endpoint("/add_reference_to_local_model", "add_reference_to_local_model", self.add_reference_to_local_model, methods=["POST"])
self.add_endpoint("/get_model_status", "get_model_status", self.get_model_status, methods=["GET"])
self.add_endpoint("/get_available_models", "get_available_models", self.get_available_models, methods=["GET"])
self.add_endpoint("/post_to_personality", "post_to_personality", self.post_to_personality, methods=["POST"])
self.add_endpoint("/reinstall_personality", "reinstall_personality", self.reinstall_personality, methods=["POST"])
@ -304,8 +304,6 @@ try:
self.add_endpoint("/uploads/<path:filename>", "serve_uploads", self.serve_uploads, methods=["GET"])
self.add_endpoint("/<path:filename>", "serve_static", self.serve_static, methods=["GET"])
self.add_endpoint("/user_infos/<path:filename>", "serve_user_infos", self.serve_user_infos, methods=["GET"])
@ -348,20 +346,27 @@ try:
self.add_endpoint("/", "", self.index, methods=["GET"])
self.add_endpoint("/settings/", "", self.index, methods=["GET"])
self.add_endpoint("/playground/", "", self.index, methods=["GET"])
self.add_endpoint("/extensions", "extensions", self.extensions, methods=["GET"])
self.add_endpoint("/training", "training", self.training, methods=["GET"])
self.add_endpoint("/main", "main", self.main, methods=["GET"])
self.add_endpoint("/settings", "settings", self.settings, methods=["GET"])
self.add_endpoint("/help", "help", self.help, methods=["GET"])
self.add_endpoint("/switch_personal_path", "switch_personal_path", self.switch_personal_path, methods=["POST"])
self.add_endpoint("/upload_avatar", "upload_avatar", self.upload_avatar, methods=["POST"])
self.add_endpoint("/edit_message", "edit_message", self.edit_message, methods=["GET"])
self.add_endpoint("/message_rank_up", "message_rank_up", self.message_rank_up, methods=["GET"])
self.add_endpoint("/message_rank_down", "message_rank_down", self.message_rank_down, methods=["GET"])
self.add_endpoint("/delete_message", "delete_message", self.delete_message, methods=["GET"])
self.add_endpoint("/get_config", "get_config", self.get_config, methods=["GET"])
self.add_endpoint("/update_setting", "update_setting", self.update_setting, methods=["POST"])
self.add_endpoint("/apply_settings", "apply_settings", self.apply_settings, methods=["POST"])
self.add_endpoint("/save_settings", "save_settings", self.save_settings, methods=["POST"])
# ----
@ -389,49 +394,18 @@ try:
self.add_endpoint("/get_config", "get_config", self.get_config, methods=["GET"])
self.add_endpoint(
"/get_available_models", "get_available_models", self.get_available_models, methods=["GET"]
)
self.add_endpoint(
"/extensions", "extensions", self.extensions, methods=["GET"]
)
self.add_endpoint(
"/upgrade_to_gpu", "upgrade_to_gpu", self.upgrade_to_gpu, methods=["GET"]
)
self.add_endpoint(
"/training", "training", self.training, methods=["GET"]
)
self.add_endpoint(
"/main", "main", self.main, methods=["GET"]
)
self.add_endpoint(
"/settings", "settings", self.settings, methods=["GET"]
)
self.add_endpoint(
"/help", "help", self.help, methods=["GET"]
)
self.add_endpoint(
"/update_setting", "update_setting", self.update_setting, methods=["POST"]
)
self.add_endpoint(
"/apply_settings", "apply_settings", self.apply_settings, methods=["POST"]
)
self.add_endpoint(
"/save_settings", "save_settings", self.save_settings, methods=["POST"]
)
self.add_endpoint(
"/get_current_personality", "get_current_personality", self.get_current_personality, methods=["GET"]
@ -493,22 +467,12 @@ try:
"/install_sd", "install_sd", self.install_sd, methods=["GET"]
)
self.add_endpoint(
"/open_code_folder", "open_code_folder", self.open_code_folder, methods=["POST"]
)
self.add_endpoint(
"/open_code_folder_in_vs_code", "open_code_folder_in_vs_code", self.open_code_folder_in_vs_code, methods=["POST"]
)
self.add_endpoint(
"/open_code_in_vs_code", "open_code_in_vs_code", self.open_code_in_vs_code, methods=["POST"]
)
self.add_endpoint(
"/open_file", "open_file", self.open_file, methods=["GET"]
)
self.add_endpoint("/open_code_folder", "open_code_folder", self.open_code_folder, methods=["POST"])
self.add_endpoint("/open_code_folder_in_vs_code", "open_code_folder_in_vs_code", self.open_code_folder_in_vs_code, methods=["POST"])
self.add_endpoint("/open_code_in_vs_code", "open_code_in_vs_code", self.open_code_in_vs_code, methods=["POST"])
self.add_endpoint("/open_file", "open_file", self.open_file, methods=["GET"])
self.add_endpoint(
"/update_binding_settings", "update_binding_settings", self.update_binding_settings, methods=["GET"]
)
self.add_endpoint("/update_binding_settings", "update_binding_settings", self.update_binding_settings, methods=["GET"])
def update_binding_settings(self):
@ -1214,22 +1178,6 @@ try:
return jsonify({"status":False,"error":str(ex)})
def upgrade_to_gpu(self):
ASCIIColors.yellow("Received command to upgrade to GPU")
ASCIIColors.info("Installing cuda toolkit")
ASCIIColors.yellow("Removing pytorch")
try:
res = subprocess.check_call(["pip","uninstall","torch", "torchvision", "torchaudio", "-y"])
except :
pass
ASCIIColors.green("PyTorch uninstalled successfully")
reinstall_pytorch_with_cuda()
ASCIIColors.yellow("Installing pytorch with cuda support")
self.config.hardware_mode="nvidia-tensorcores"
return jsonify({'status':res==0})
def ram_usage(self):
"""
Returns the RAM usage in bytes.

33
endpoints/chat_bar.py Normal file
View File

@ -0,0 +1,33 @@
"""
project: lollms_webui
file: chat_bar.py
author: ParisNeo
description:
This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
application. These routes are linked to lollms_webui chatbox
"""
from fastapi import APIRouter, Request
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import detect_antiprompt, remove_text_from_string, trace_exception
from ascii_colors import ASCIIColors
from api.db import DiscussionsDB
from pathlib import Path
from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, VisualizationMethod
import tqdm
from fastapi import FastAPI, UploadFile, File
import shutil
import os
import platform
from utilities.execution_engines.python_execution_engine import execute_python
from utilities.execution_engines.latex_execution_engine import execute_latex
from utilities.execution_engines.shell_execution_engine import execute_bash
router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()

View File

@ -21,11 +21,14 @@ from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, Visu
import tqdm
from fastapi import FastAPI, UploadFile, File
import shutil
import os
import platform
from utilities.execution_engines.python_execution_engine import execute_python
from utilities.execution_engines.latex_execution_engine import execute_latex
from utilities.execution_engines.shell_execution_engine import execute_bash
router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
@ -60,3 +63,146 @@ async def execute_code(request: Request):
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
@router.post("/open_code_folder")
async def open_code_folder(request: Request):
"""
Opens code folder.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
try:
data = (await request.json())
discussion_id = data.get("discussion_id","unknown_discussion")
ASCIIColors.info("Opening folder:")
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
if platform.system() == 'Windows':
os.startfile(str(root_folder))
elif platform.system() == 'Linux':
os.system('xdg-open ' + str(root_folder))
elif platform.system() == 'Darwin':
os.system('open ' + str(root_folder))
return {"output": "OK", "execution_time": 0}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
@router.post("/open_code_folder_in_vs_code")
async def open_code_folder_in_vs_code(request: Request):
"""
Opens code folder.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
try:
data = (await request.json())
code = data["code"]
discussion_id = data.get("discussion_id","unknown_discussion")
message_id = data.get("message_id","unknown_message")
language = data.get("language","python")
ASCIIColors.info("Opening folder:")
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
tmp_file = root_folder/f"ai_code_{message_id}.py"
with open(tmp_file,"w") as f:
f.write(code)
os.system('code ' + str(root_folder))
return {"output": "OK", "execution_time": 0}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
async def open_file(request: Request):
"""
Opens code in vs code.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
try:
data = (await request.json())
path = data.get('path')
os.system("start "+path)
return {"output": "OK", "execution_time": 0}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
async def open_code_in_vs_code(request: Request):
"""
Opens code in vs code.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
try:
data = (await request.json())
discussion_id = data.get("discussion_id","unknown_discussion")
message_id = data.get("message_id","")
code = data["code"]
discussion_id = data.get("discussion_id","unknown_discussion")
message_id = data.get("message_id","unknown_message")
language = data.get("language","python")
ASCIIColors.info("Opening folder:")
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"/f"{message_id}.py"
root_folder.mkdir(parents=True,exist_ok=True)
tmp_file = root_folder/f"ai_code_{message_id}.py"
with open(tmp_file,"w") as f:
f.write(code)
os.system('code ' + str(root_folder))
return {"output": "OK", "execution_time": 0}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
async def open_code_folder(request: Request):
"""
Opens code folder.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
try:
data = (await request.json())
discussion_id = data.get("discussion_id","unknown_discussion")
ASCIIColors.info("Opening folder:")
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
if platform.system() == 'Windows':
os.startfile(str(root_folder))
elif platform.system() == 'Linux':
os.system('xdg-open ' + str(root_folder))
elif platform.system() == 'Darwin':
os.system('open ' + str(root_folder))
return {"output": "OK", "execution_time": 0}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}

@ -1 +1 @@
Subproject commit c7ebbb1228a1bc48da1d953629569d60c185a011
Subproject commit fc1598a2f59ceb721a3ef8412c0fc035344226b6

View File

@ -75,6 +75,7 @@ if __name__ == "__main__":
from endpoints.lollms_message import router as lollms_message_router
from endpoints.lollms_user import router as lollms_user_router
from endpoints.lollms_advanced import router as lollms_advanced_router
from endpoints.chat_bar import router as chat_bar_router
@ -98,6 +99,8 @@ if __name__ == "__main__":
app.include_router(lollms_message_router)
app.include_router(lollms_user_router)
app.include_router(lollms_advanced_router)
app.include_router(chat_bar_router)
app.include_router(lollms_configuration_infos_router)
@ -107,6 +110,7 @@ if __name__ == "__main__":
lollms_webui_discussion_events_add(sio)
app.mount("/extensions", StaticFiles(directory=Path(__file__).parent/"web"/"dist", html=True), name="extensions")
app.mount("/playground", StaticFiles(directory=Path(__file__).parent/"web"/"dist", html=True), name="playground")
app.mount("/settings", StaticFiles(directory=Path(__file__).parent/"web"/"dist", html=True), name="settings")
app.mount("/", StaticFiles(directory=Path(__file__).parent/"web"/"dist", html=True), name="static")
@ -118,16 +122,17 @@ if __name__ == "__main__":
# if autoshow
if config.auto_show_browser:
if config['host']=="0.0.0.0":
#webbrowser.open(f"http://localhost:{config['port']}")
webbrowser.open(f"http://localhost:{6523}") # needed for debug (to be removed in production)
webbrowser.open(f"http://localhost:{config['port']}")
#webbrowser.open(f"http://localhost:{6523}") # needed for debug (to be removed in production)
else:
#webbrowser.open(f"http://{config['host']}:{config['port']}")
webbrowser.open(f"http://{config['host']}:{6523}") # needed for debug (to be removed in production)
webbrowser.open(f"http://{config['host']}:{config['port']}")
#webbrowser.open(f"http://{config['host']}:{6523}") # needed for debug (to be removed in production)
try:
sio.reboot = False
uvicorn.run(app, host=config.host, port=6523)#config.port)
#uvicorn.run(app, host=config.host, port=6523)
uvicorn.run(app, host=config.host, port=config.port)
if sio.reboot:
ASCIIColors.info("")
ASCIIColors.info("")

View File

@ -12,7 +12,7 @@ from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import detect_antiprompt, remove_text_from_string, trace_exception, get_trace_exception
from ascii_colors import get_trace_exception, trace_exception
from ascii_colors import ASCIIColors
from api.db import DiscussionsDB
from pathlib import Path
@ -26,7 +26,7 @@ import json
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
def execute_latex(lollmsElfServer:LOLLMSWebUI, code, discussion_id, message_id):
def execute_latex(code, discussion_id, message_id):
def spawn_process(code):
"""Executes Python code and returns the output as JSON."""
@ -54,7 +54,7 @@ def execute_latex(lollmsElfServer:LOLLMSWebUI, code, discussion_id, message_id):
error_message = result.stderr.strip()
execution_time = time.time() - start_time
error_json = {"output": f"Error occurred while compiling LaTeX: {error_message}", "execution_time": execution_time}
return json.dumps(error_json)
return error_json
# If the compilation is successful, you will get a PDF file
pdf_file = tmp_file.with_suffix('.pdf')
print(f"PDF file generated: {pdf_file}")
@ -62,14 +62,18 @@ def execute_latex(lollmsElfServer:LOLLMSWebUI, code, discussion_id, message_id):
except subprocess.CalledProcessError as ex:
lollmsElfServer.error(f"Error occurred while compiling LaTeX: {ex}")
error_json = {"output": "<div class='text-red-500'>"+str(ex)+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
return error_json
# Stop the timer.
execution_time = time.time() - start_time
# The child process was successful.
pdf_file=str(pdf_file)
url = f"{routing.get_url_path_for(lollmsElfServer.app.router, 'main')[:-4]}{pdf_file[pdf_file.index('outputs'):]}"
pdf_file=str(pdf_file).replace("\\","/")
if not "http" in lollmsElfServer.config.host:
host = "http://"+lollmsElfServer.config.host
else:
host = lollmsElfServer.config.host
url = f"{host}:{lollmsElfServer.config.port}/{pdf_file[pdf_file.index('outputs'):]}"
output_json = {"output": f"Pdf file generated at: {pdf_file}\n<a href='{url}'>Click here to show</a>", "execution_time": execution_time}
return json.dumps(output_json)
return output_json
return spawn_process(code)

View File

@ -6,20 +6,9 @@ description:
This is a utility for executing python code
"""
from fastapi import APIRouter, Request, routing
from fastapi import routing
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import detect_antiprompt, remove_text_from_string, trace_exception, get_trace_exception
from ascii_colors import ASCIIColors
from api.db import DiscussionsDB
from pathlib import Path
from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, VisualizationMethod
import tqdm
from fastapi import FastAPI, UploadFile, File
import shutil
from ascii_colors import get_trace_exception, trace_exception
import time
import subprocess
import json
@ -56,7 +45,7 @@ def execute_python(code, discussion_id, message_id):
execution_time = time.time() - start_time
error_message = f"Error executing Python code: {ex}"
error_json = {"output": "<div class='text-red-500'>"+ex+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
return error_json
# Stop the timer.
execution_time = time.time() - start_time
@ -66,100 +55,9 @@ def execute_python(code, discussion_id, message_id):
# The child process threw an exception.
error_message = f"Error executing Python code: {error.decode('utf8')}"
error_json = {"output": "<div class='text-red-500'>"+error_message+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
return error_json
# The child process was successful.
output_json = {"output": output.decode("utf8"), "execution_time": execution_time}
return json.dumps(output_json)
return spawn_process(code)
def execute_latex(lollmsElfServer:LOLLMSWebUI, code, discussion_id, message_id):
def spawn_process(code):
"""Executes Python code and returns the output as JSON."""
# Start the timer.
start_time = time.time()
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
tmp_file = root_folder/f"latex_file_{message_id}.tex"
with open(tmp_file,"w",encoding="utf8") as f:
f.write(code)
try:
# Determine the pdflatex command based on the provided or default path
if lollmsElfServer.config.pdf_latex_path:
pdflatex_command = lollmsElfServer.config.pdf_latex_path
else:
pdflatex_command = 'pdflatex'
# Set the execution path to the folder containing the tmp_file
execution_path = tmp_file.parent
# Run the pdflatex command with the file path
result = subprocess.run([pdflatex_command, "-interaction=nonstopmode", tmp_file], check=True, capture_output=True, text=True, cwd=execution_path)
# Check the return code of the pdflatex command
if result.returncode != 0:
error_message = result.stderr.strip()
execution_time = time.time() - start_time
error_json = {"output": f"Error occurred while compiling LaTeX: {error_message}", "execution_time": execution_time}
return json.dumps(error_json)
# If the compilation is successful, you will get a PDF file
pdf_file = tmp_file.with_suffix('.pdf')
print(f"PDF file generated: {pdf_file}")
except subprocess.CalledProcessError as ex:
lollmsElfServer.error(f"Error occurred while compiling LaTeX: {ex}")
error_json = {"output": "<div class='text-red-500'>"+str(ex)+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# Stop the timer.
execution_time = time.time() - start_time
# The child process was successful.
pdf_file=str(pdf_file)
url = f"{routing.get_url_path_for(lollmsElfServer.app.router, 'main')[:-4]}{pdf_file[pdf_file.index('outputs'):]}"
output_json = {"output": f"Pdf file generated at: {pdf_file}\n<a href='{url}'>Click here to show</a>", "execution_time": execution_time}
return json.dumps(output_json)
return spawn_process(code)
def execute_bash(lollmsElfServer, code, discussion_id, message_id):
def spawn_process(code):
"""Executes Python code and returns the output as JSON."""
# Start the timer.
start_time = time.time()
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
try:
# Execute the Python code in a temporary file.
process = subprocess.Popen(
code,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Get the output and error from the process.
output, error = process.communicate()
except Exception as ex:
# Stop the timer.
execution_time = time.time() - start_time
error_message = f"Error executing Python code: {ex}"
error_json = {"output": "<div class='text-red-500'>"+str(ex)+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# Stop the timer.
execution_time = time.time() - start_time
# Check if the process was successful.
if process.returncode != 0:
# The child process threw an exception.
error_message = f"Error executing Python code: {error.decode('utf8')}"
error_json = {"output": "<div class='text-red-500'>"+error_message+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# The child process was successful.
output_json = {"output": output.decode("utf8"), "execution_time": execution_time}
return json.dumps(output_json)
return output_json
return spawn_process(code)

View File

@ -6,26 +6,14 @@ description:
This is a utility for executing python code
"""
from fastapi import APIRouter, Request, routing
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import detect_antiprompt, remove_text_from_string, trace_exception, get_trace_exception
from ascii_colors import ASCIIColors
from api.db import DiscussionsDB
from pathlib import Path
from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, VisualizationMethod
import tqdm
from fastapi import FastAPI, UploadFile, File
import shutil
from ascii_colors import get_trace_exception, trace_exception
import time
import subprocess
import json
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
def execute_bash(lollmsElfServer, code, discussion_id, message_id):
def execute_bash(code, discussion_id, message_id):
def spawn_process(code):
"""Executes Python code and returns the output as JSON."""
@ -51,7 +39,7 @@ def execute_bash(lollmsElfServer, code, discussion_id, message_id):
execution_time = time.time() - start_time
error_message = f"Error executing Python code: {ex}"
error_json = {"output": "<div class='text-red-500'>"+str(ex)+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
return error_json
# Stop the timer.
execution_time = time.time() - start_time
@ -61,9 +49,9 @@ def execute_bash(lollmsElfServer, code, discussion_id, message_id):
# The child process threw an exception.
error_message = f"Error executing Python code: {error.decode('utf8')}"
error_json = {"output": "<div class='text-red-500'>"+error_message+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
return error_json
# The child process was successful.
output_json = {"output": output.decode("utf8"), "execution_time": execution_time}
return json.dumps(output_json)
return output_json
return spawn_process(code)

@ -1 +1 @@
Subproject commit 12006786e070ddc17822db3be2217afd9441ff88
Subproject commit 08c176fc621478790a103a37cab11f4082a2fb69