diff --git a/lollms/services/petals/install_petals.sh b/lollms/services/petals/install_petals.sh index da891f9..14ec027 100644 --- a/lollms/services/petals/install_petals.sh +++ b/lollms/services/petals/install_petals.sh @@ -8,15 +8,20 @@ else echo Installing conda curl -LO https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh ./Miniconda3-latest-Linux-x86_64.sh -b - $HOME/miniconda3/bin/conda init --all rm ./Miniconda3-latest-Linux-x86_64.sh echo Done fi PATH="$HOME/miniconda3/bin:$PATH" -conda init export PATH -echo "Installing vllm" -conda create -n vllm python=3.9 -y -conda activate vllm -pip install vllm +echo "Initializing conda" +conda init --all +export PATH +echo "Installing petals" +conda create -n petals python=3.9 -y +echo "Activating petals environment" +source activate petals +pip install petals +git clone https://github.com/ParisNeo/petals_server.git +cd petals_server +pip install -e . echo "Done" diff --git a/lollms/services/petals/run_petals.sh b/lollms/services/petals/run_petals.sh index 6db2464..ab3b66d 100644 --- a/lollms/services/petals/run_petals.sh +++ b/lollms/services/petals/run_petals.sh @@ -3,7 +3,7 @@ cd ~/vllm PATH="$HOME/miniconda3/bin:$PATH" export PATH -conda activate vllm && python -m vllm.entrypoints.openai.api_server --model %1 +conda activate vllm && python -m vllm.entrypoints.openai.api_server --model_name "$1" --node_name "$2" --device "$3" # Wait for all background processes to finish wait \ No newline at end of file diff --git a/lollms/services/vllm/install_vllm.sh b/lollms/services/vllm/install_vllm.sh index 5c174c0..9cd0925 100644 --- a/lollms/services/vllm/install_vllm.sh +++ b/lollms/services/vllm/install_vllm.sh @@ -11,11 +11,13 @@ else rm ./Miniconda3-latest-Linux-x86_64.sh echo Done fi +PATH="$HOME/miniconda3/bin:$PATH" +export PATH echo "Initializing conda" -$HOME/miniconda3/bin/conda init --all +conda init --all export PATH echo "Installing vllm" -$HOME/miniconda3/bin/conda create -n vllm python=3.9 -y +conda create -n vllm python=3.9 -y echo "Activating vllm environment" source activate vllm pip install vllm diff --git a/lollms/services/vllm/lollms_vllm.py b/lollms/services/vllm/lollms_vllm.py index cf56d6b..5e479e4 100644 --- a/lollms/services/vllm/lollms_vllm.py +++ b/lollms/services/vllm/lollms_vllm.py @@ -105,9 +105,10 @@ class Service: # run vllm if platform.system() == 'Windows': - subprocess.Popen(['wsl', 'bash', '$HOME/run_vllm.sh ']) + #subprocess.Popen(['wsl', 'ls', '$HOME']) + subprocess.Popen(['wsl', 'bash', '$HOME/run_vllm.sh']) else: - subprocess.Popen(['bash', f'{Path.home()}/run_vllm.sh']) + subprocess.Popen(['bash', f'{Path.home()}/run_vllm.sh', self.app.config.vllm_model_path]) # Wait until the service is available at http://127.0.0.1:7860/ self.wait_for_service(max_retries=wait_max_retries) diff --git a/lollms/services/vllm/run_vllm.sh b/lollms/services/vllm/run_vllm.sh index 88c3d35..8c2a42c 100644 --- a/lollms/services/vllm/run_vllm.sh +++ b/lollms/services/vllm/run_vllm.sh @@ -1,9 +1,10 @@ #!/bin/bash -cd ~/vllm PATH="$HOME/miniconda3/bin:$PATH" export PATH -source activate vllm && python -m vllm.entrypoints.openai.api_server --model %1 +echo "Initializing conda" +$HOME/miniconda3/bin/conda init --all +source activate vllm && python -m vllm.entrypoints.openai.api_server --model "$1" # Wait for all background processes to finish wait \ No newline at end of file