diff --git a/lollms/server/endpoints/lollms_binding_infos.py b/lollms/server/endpoints/lollms_binding_infos.py index 81001ff..af3cfda 100644 --- a/lollms/server/endpoints/lollms_binding_infos.py +++ b/lollms/server/endpoints/lollms_binding_infos.py @@ -88,8 +88,8 @@ async def reload_binding(request: Request): try: data = (await request.json()) - print(f"Roloading binding selected : {data['binding_name']}") - lollmsElfServer.config["binding_name"]=data['binding_name'] + print(f"Roloading binding selected : {data['name']}") + lollmsElfServer.config["binding_name"]=data['name'] if lollmsElfServer.binding: lollmsElfServer.binding.destroy_model() lollmsElfServer.binding = None @@ -102,6 +102,7 @@ async def reload_binding(request: Request): lollmsElfServer.model = None lollmsElfServer.config.save_config() ASCIIColors.green("Binding loaded successfully") + return {"status":True} except Exception as ex: ASCIIColors.error(f"Couldn't build binding: [{ex}]") trace_exception(ex) diff --git a/lollms/services/vllm/install_vllm.sh b/lollms/services/vllm/install_vllm.sh index 8228405..02983f1 100644 --- a/lollms/services/vllm/install_vllm.sh +++ b/lollms/services/vllm/install_vllm.sh @@ -1,2 +1,16 @@ -#!/bin/sh -conda create -n myenv python=3.9 -y && conda activate myenv && pip install vllm --user \ No newline at end of file +#!/bin/bash + +# Check if miniconda3/bin/conda exists +if [ -e "$HOME/miniconda3/bin/conda" ]; then + echo "Conda is installed!" +else + echo "Conda is not installed. Please install it first." + echo Installing conda + curl -LOk https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh + ./Miniconda3-latest-Linux-x86_64.sh -b + $HOME/miniconda3/bin/conda init + echo Done +fi +PATH="$HOME/miniconda3/bin:$PATH" +export PATH +conda create -n vllm python=3.9 -y && conda activate vllm && pip install vllm --user \ No newline at end of file diff --git a/lollms/services/vllm/run_vllm.sh b/lollms/services/vllm/run_vllm.sh index e61578f..6db2464 100644 --- a/lollms/services/vllm/run_vllm.sh +++ b/lollms/services/vllm/run_vllm.sh @@ -1,8 +1,9 @@ #!/bin/bash cd ~/vllm - -python -m vllm.entrypoints.openai.api_server --model %1 +PATH="$HOME/miniconda3/bin:$PATH" +export PATH +conda activate vllm && python -m vllm.entrypoints.openai.api_server --model %1 # Wait for all background processes to finish wait \ No newline at end of file