fixed some bugs and added execution engines

This commit is contained in:
Saifeddine ALOUI 2024-01-07 23:17:22 +01:00
parent 448d90b25e
commit de8adeb8d4
12 changed files with 575 additions and 198 deletions

4
app.py
View File

@ -649,8 +649,8 @@ try:
pdf_file = tmp_file.with_suffix('.pdf') pdf_file = tmp_file.with_suffix('.pdf')
print(f"PDF file generated: {pdf_file}") print(f"PDF file generated: {pdf_file}")
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as ex:
self.error(f"Error occurred while compiling LaTeX: {e}") self.error(f"Error occurred while compiling LaTeX: {ex}")
error_json = {"output": "<div class='text-red-500'>"+str(ex)+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time} error_json = {"output": "<div class='text-red-500'>"+str(ex)+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json) return json.dumps(error_json)

View File

@ -0,0 +1,62 @@
"""
project: lollms_user
file: lollms_user.py
author: ParisNeo
description:
This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
application. These routes allow users to do advanced stuff like executing code.
"""
from fastapi import APIRouter, Request
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import detect_antiprompt, remove_text_from_string, trace_exception
from ascii_colors import ASCIIColors
from api.db import DiscussionsDB
from pathlib import Path
from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, VisualizationMethod
import tqdm
from fastapi import FastAPI, UploadFile, File
import shutil
from utilities.execution_engines.python_execution_engine import execute_python
from utilities.execution_engines.latex_execution_engine import execute_latex
from utilities.execution_engines.shell_execution_engine import execute_bash
router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
@router.post("/execute_code")
async def execute_code(request: Request):
"""
Executes Python code and returns the output.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
try:
data = (await request.json())
code = data["code"]
discussion_id = data.get("discussion_id","unknown_discussion")
message_id = data.get("message_id","unknown_message")
language = data.get("language","python")
ASCIIColors.info("Executing python code:")
ASCIIColors.yellow(code)
if language=="python":
return execute_python(code, discussion_id, message_id)
elif language=="latex":
return execute_latex(code, discussion_id, message_id)
elif language in ["bash","shell","cmd","powershell"]:
return execute_bash(code, discussion_id, message_id)
return {"output": "Unsupported language", "execution_time": 0}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}

@ -1 +1 @@
Subproject commit fb402a5d3d043efbecd7199ea45927d6ad37af6c Subproject commit c7ebbb1228a1bc48da1d953629569d60c185a011

View File

@ -74,6 +74,7 @@ if __name__ == "__main__":
from endpoints.lollms_discussion import router as lollms_discussion_router from endpoints.lollms_discussion import router as lollms_discussion_router
from endpoints.lollms_message import router as lollms_message_router from endpoints.lollms_message import router as lollms_message_router
from endpoints.lollms_user import router as lollms_user_router from endpoints.lollms_user import router as lollms_user_router
from endpoints.lollms_advanced import router as lollms_advanced_router
@ -90,11 +91,13 @@ if __name__ == "__main__":
app.include_router(lollms_extensions_infos_router) app.include_router(lollms_extensions_infos_router)
app.include_router(lollms_webui_infos_router) app.include_router(lollms_webui_infos_router)
app.include_router(lollms_generator_router) app.include_router(lollms_generator_router)
app.include_router(lollms_discussion_router) app.include_router(lollms_discussion_router)
app.include_router(lollms_message_router) app.include_router(lollms_message_router)
app.include_router(lollms_user_router) app.include_router(lollms_user_router)
app.include_router(lollms_advanced_router)
app.include_router(lollms_configuration_infos_router) app.include_router(lollms_configuration_infos_router)
@ -109,6 +112,8 @@ if __name__ == "__main__":
app.mount("/", StaticFiles(directory=Path(__file__).parent/"web"/"dist", html=True), name="static") app.mount("/", StaticFiles(directory=Path(__file__).parent/"web"/"dist", html=True), name="static")
app = ASGIApp(socketio_server=sio, other_asgi_app=app) app = ASGIApp(socketio_server=sio, other_asgi_app=app)
lollmsElfServer.app = app
# if autoshow # if autoshow
if config.auto_show_browser: if config.auto_show_browser:

View File

@ -0,0 +1,75 @@
"""
project: lollms_webui
file: latex_execution_engine.py
author: ParisNeo
description:
This is a utility for executing latex code
"""
from fastapi import APIRouter, Request, routing
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import detect_antiprompt, remove_text_from_string, trace_exception, get_trace_exception
from ascii_colors import ASCIIColors
from api.db import DiscussionsDB
from pathlib import Path
from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, VisualizationMethod
import tqdm
from fastapi import FastAPI, UploadFile, File
import shutil
import time
import subprocess
import json
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
def execute_latex(lollmsElfServer:LOLLMSWebUI, code, discussion_id, message_id):
def spawn_process(code):
"""Executes Python code and returns the output as JSON."""
# Start the timer.
start_time = time.time()
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
tmp_file = root_folder/f"latex_file_{message_id}.tex"
with open(tmp_file,"w",encoding="utf8") as f:
f.write(code)
try:
# Determine the pdflatex command based on the provided or default path
if lollmsElfServer.config.pdf_latex_path:
pdflatex_command = lollmsElfServer.config.pdf_latex_path
else:
pdflatex_command = 'pdflatex'
# Set the execution path to the folder containing the tmp_file
execution_path = tmp_file.parent
# Run the pdflatex command with the file path
result = subprocess.run([pdflatex_command, "-interaction=nonstopmode", tmp_file], check=True, capture_output=True, text=True, cwd=execution_path)
# Check the return code of the pdflatex command
if result.returncode != 0:
error_message = result.stderr.strip()
execution_time = time.time() - start_time
error_json = {"output": f"Error occurred while compiling LaTeX: {error_message}", "execution_time": execution_time}
return json.dumps(error_json)
# If the compilation is successful, you will get a PDF file
pdf_file = tmp_file.with_suffix('.pdf')
print(f"PDF file generated: {pdf_file}")
except subprocess.CalledProcessError as ex:
lollmsElfServer.error(f"Error occurred while compiling LaTeX: {ex}")
error_json = {"output": "<div class='text-red-500'>"+str(ex)+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# Stop the timer.
execution_time = time.time() - start_time
# The child process was successful.
pdf_file=str(pdf_file)
url = f"{routing.get_url_path_for(lollmsElfServer.app.router, 'main')[:-4]}{pdf_file[pdf_file.index('outputs'):]}"
output_json = {"output": f"Pdf file generated at: {pdf_file}\n<a href='{url}'>Click here to show</a>", "execution_time": execution_time}
return json.dumps(output_json)
return spawn_process(code)

View File

@ -0,0 +1,165 @@
"""
project: lollms_webui
file: python_execution_engine.py
author: ParisNeo
description:
This is a utility for executing python code
"""
from fastapi import APIRouter, Request, routing
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import detect_antiprompt, remove_text_from_string, trace_exception, get_trace_exception
from ascii_colors import ASCIIColors
from api.db import DiscussionsDB
from pathlib import Path
from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, VisualizationMethod
import tqdm
from fastapi import FastAPI, UploadFile, File
import shutil
import time
import subprocess
import json
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
def execute_python(code, discussion_id, message_id):
def spawn_process(code):
"""Executes Python code and returns the output as JSON."""
# Start the timer.
start_time = time.time()
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
tmp_file = root_folder/f"ai_code_{message_id}.py"
with open(tmp_file,"w",encoding="utf8") as f:
f.write(code)
try:
# Execute the Python code in a temporary file.
process = subprocess.Popen(
["python", str(tmp_file)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=root_folder
)
# Get the output and error from the process.
output, error = process.communicate()
except Exception as ex:
# Stop the timer.
execution_time = time.time() - start_time
error_message = f"Error executing Python code: {ex}"
error_json = {"output": "<div class='text-red-500'>"+ex+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# Stop the timer.
execution_time = time.time() - start_time
# Check if the process was successful.
if process.returncode != 0:
# The child process threw an exception.
error_message = f"Error executing Python code: {error.decode('utf8')}"
error_json = {"output": "<div class='text-red-500'>"+error_message+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# The child process was successful.
output_json = {"output": output.decode("utf8"), "execution_time": execution_time}
return json.dumps(output_json)
return spawn_process(code)
def execute_latex(lollmsElfServer:LOLLMSWebUI, code, discussion_id, message_id):
def spawn_process(code):
"""Executes Python code and returns the output as JSON."""
# Start the timer.
start_time = time.time()
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
tmp_file = root_folder/f"latex_file_{message_id}.tex"
with open(tmp_file,"w",encoding="utf8") as f:
f.write(code)
try:
# Determine the pdflatex command based on the provided or default path
if lollmsElfServer.config.pdf_latex_path:
pdflatex_command = lollmsElfServer.config.pdf_latex_path
else:
pdflatex_command = 'pdflatex'
# Set the execution path to the folder containing the tmp_file
execution_path = tmp_file.parent
# Run the pdflatex command with the file path
result = subprocess.run([pdflatex_command, "-interaction=nonstopmode", tmp_file], check=True, capture_output=True, text=True, cwd=execution_path)
# Check the return code of the pdflatex command
if result.returncode != 0:
error_message = result.stderr.strip()
execution_time = time.time() - start_time
error_json = {"output": f"Error occurred while compiling LaTeX: {error_message}", "execution_time": execution_time}
return json.dumps(error_json)
# If the compilation is successful, you will get a PDF file
pdf_file = tmp_file.with_suffix('.pdf')
print(f"PDF file generated: {pdf_file}")
except subprocess.CalledProcessError as ex:
lollmsElfServer.error(f"Error occurred while compiling LaTeX: {ex}")
error_json = {"output": "<div class='text-red-500'>"+str(ex)+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# Stop the timer.
execution_time = time.time() - start_time
# The child process was successful.
pdf_file=str(pdf_file)
url = f"{routing.get_url_path_for(lollmsElfServer.app.router, 'main')[:-4]}{pdf_file[pdf_file.index('outputs'):]}"
output_json = {"output": f"Pdf file generated at: {pdf_file}\n<a href='{url}'>Click here to show</a>", "execution_time": execution_time}
return json.dumps(output_json)
return spawn_process(code)
def execute_bash(lollmsElfServer, code, discussion_id, message_id):
def spawn_process(code):
"""Executes Python code and returns the output as JSON."""
# Start the timer.
start_time = time.time()
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
try:
# Execute the Python code in a temporary file.
process = subprocess.Popen(
code,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Get the output and error from the process.
output, error = process.communicate()
except Exception as ex:
# Stop the timer.
execution_time = time.time() - start_time
error_message = f"Error executing Python code: {ex}"
error_json = {"output": "<div class='text-red-500'>"+str(ex)+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# Stop the timer.
execution_time = time.time() - start_time
# Check if the process was successful.
if process.returncode != 0:
# The child process threw an exception.
error_message = f"Error executing Python code: {error.decode('utf8')}"
error_json = {"output": "<div class='text-red-500'>"+error_message+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# The child process was successful.
output_json = {"output": output.decode("utf8"), "execution_time": execution_time}
return json.dumps(output_json)
return spawn_process(code)

View File

@ -0,0 +1,69 @@
"""
project: lollms_webui
file: shell_execution_engine.py
author: ParisNeo
description:
This is a utility for executing python code
"""
from fastapi import APIRouter, Request, routing
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import detect_antiprompt, remove_text_from_string, trace_exception, get_trace_exception
from ascii_colors import ASCIIColors
from api.db import DiscussionsDB
from pathlib import Path
from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, VisualizationMethod
import tqdm
from fastapi import FastAPI, UploadFile, File
import shutil
import time
import subprocess
import json
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
def execute_bash(lollmsElfServer, code, discussion_id, message_id):
def spawn_process(code):
"""Executes Python code and returns the output as JSON."""
# Start the timer.
start_time = time.time()
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
try:
# Execute the Python code in a temporary file.
process = subprocess.Popen(
code,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Get the output and error from the process.
output, error = process.communicate()
except Exception as ex:
# Stop the timer.
execution_time = time.time() - start_time
error_message = f"Error executing Python code: {ex}"
error_json = {"output": "<div class='text-red-500'>"+str(ex)+"\n"+get_trace_exception(ex)+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# Stop the timer.
execution_time = time.time() - start_time
# Check if the process was successful.
if process.returncode != 0:
# The child process threw an exception.
error_message = f"Error executing Python code: {error.decode('utf8')}"
error_json = {"output": "<div class='text-red-500'>"+error_message+"</div>", "execution_time": execution_time}
return json.dumps(error_json)
# The child process was successful.
output_json = {"output": output.decode("utf8"), "execution_time": execution_time}
return json.dumps(output_json)
return spawn_process(code)

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI - Welcome</title> <title>LoLLMS WebUI - Welcome</title>
<script type="module" crossorigin src="/assets/index-db831b95.js"></script> <script type="module" crossorigin src="/assets/index-a1f2945d.js"></script>
<link rel="stylesheet" href="/assets/index-d5a593e6.css"> <link rel="stylesheet" href="/assets/index-299ef7d5.css">
</head> </head>
<body> <body>
<div id="app"></div> <div id="app"></div>

View File

@ -513,16 +513,16 @@ app.mixin({
await this.$store.dispatch('refreshConfig'); await this.$store.dispatch('refreshConfig');
console.log("Config ready") console.log("Config ready")
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }
try{ try{
this.$store.state.loading_infos = "Loading Database" this.$store.state.loading_infos = "Loading Database"
this.$store.state.loading_progress = 20 this.$store.state.loading_progress = 20
await this.$store.dispatch('refreshDatabase'); await this.$store.dispatch('refreshDatabase');
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }
try{ try{
@ -530,8 +530,8 @@ app.mixin({
this.$store.state.loading_progress = 30 this.$store.state.loading_progress = 30
await this.$store.dispatch('getVersion'); await this.$store.dispatch('getVersion');
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }
try{ try{
@ -539,31 +539,31 @@ app.mixin({
this.$store.state.loading_progress = 40 this.$store.state.loading_progress = 40
await this.$store.dispatch('refreshBindings'); await this.$store.dispatch('refreshBindings');
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }
try{ try{
this.$store.state.loading_infos = "Getting Hardware usage" this.$store.state.loading_infos = "Getting Hardware usage"
await refreshHardwareUsage(this.$store); await refreshHardwareUsage(this.$store);
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }
try{ try{
this.$store.state.loading_infos = "Getting extensions zoo" this.$store.state.loading_infos = "Getting extensions zoo"
this.$store.state.loading_progress = 50 this.$store.state.loading_progress = 50
await this.$store.dispatch('refreshExtensionsZoo'); await this.$store.dispatch('refreshExtensionsZoo');
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }
try{ try{
this.$store.state.loading_infos = "Getting mounted extensions" this.$store.state.loading_infos = "Getting mounted extensions"
this.$store.state.loading_progress = 60 this.$store.state.loading_progress = 60
await this.$store.dispatch('refreshmountedExtensions'); await this.$store.dispatch('refreshmountedExtensions');
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }
try{ try{
@ -571,16 +571,16 @@ app.mixin({
this.$store.state.loading_progress = 70 this.$store.state.loading_progress = 70
await this.$store.dispatch('refreshPersonalitiesZoo') await this.$store.dispatch('refreshPersonalitiesZoo')
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }
try{ try{
this.$store.state.loading_infos = "Getting mounted personalities" this.$store.state.loading_infos = "Getting mounted personalities"
this.$store.state.loading_progress = 80 this.$store.state.loading_progress = 80
await this.$store.dispatch('refreshMountedPersonalities'); await this.$store.dispatch('refreshMountedPersonalities');
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }
try{ try{
@ -588,8 +588,8 @@ app.mixin({
this.$store.state.loading_progress = 90 this.$store.state.loading_progress = 90
await this.$store.dispatch('refreshModelsZoo'); await this.$store.dispatch('refreshModelsZoo');
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }
try{ try{
this.$store.state.loading_infos = "Getting active models" this.$store.state.loading_infos = "Getting active models"
@ -597,8 +597,8 @@ app.mixin({
await this.$store.dispatch('refreshModels'); await this.$store.dispatch('refreshModels');
await this.$store.dispatch('refreshModelStatus'); await this.$store.dispatch('refreshModelStatus');
} }
catch{ catch (ex){
console.log("Error cought:", ex)
} }

View File

@ -3835,6 +3835,7 @@ export default {
async unmountAll(){ async unmountAll(){
await axios.get('/unmount_all_personalities'); await axios.get('/unmount_all_personalities');
this.$store.dispatch('refreshMountedPersonalities'); this.$store.dispatch('refreshMountedPersonalities');
this.$store.dispatch('refreshConfig');
this.$store.state.toast.showToast("All personas unmounted", 4, true) this.$store.state.toast.showToast("All personas unmounted", 4, true)
}, },
async unmountPersonality(pers) { async unmountPersonality(pers) {