towards working vllm and petals

This commit is contained in:
Saifeddine ALOUI 2024-02-20 00:24:40 +01:00
parent 3a3e58e28f
commit 4141260697
5 changed files with 22 additions and 13 deletions

View File

@ -8,15 +8,20 @@ else
echo Installing conda
curl -LO https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
./Miniconda3-latest-Linux-x86_64.sh -b
$HOME/miniconda3/bin/conda init --all
rm ./Miniconda3-latest-Linux-x86_64.sh
echo Done
fi
PATH="$HOME/miniconda3/bin:$PATH"
conda init
export PATH
echo "Installing vllm"
conda create -n vllm python=3.9 -y
conda activate vllm
pip install vllm
echo "Initializing conda"
conda init --all
export PATH
echo "Installing petals"
conda create -n petals python=3.9 -y
echo "Activating petals environment"
source activate petals
pip install petals
git clone https://github.com/ParisNeo/petals_server.git
cd petals_server
pip install -e .
echo "Done"

View File

@ -3,7 +3,7 @@
cd ~/vllm
PATH="$HOME/miniconda3/bin:$PATH"
export PATH
conda activate vllm && python -m vllm.entrypoints.openai.api_server --model %1
conda activate vllm && python -m vllm.entrypoints.openai.api_server --model_name "$1" --node_name "$2" --device "$3"
# Wait for all background processes to finish
wait

View File

@ -11,11 +11,13 @@ else
rm ./Miniconda3-latest-Linux-x86_64.sh
echo Done
fi
PATH="$HOME/miniconda3/bin:$PATH"
export PATH
echo "Initializing conda"
$HOME/miniconda3/bin/conda init --all
conda init --all
export PATH
echo "Installing vllm"
$HOME/miniconda3/bin/conda create -n vllm python=3.9 -y
conda create -n vllm python=3.9 -y
echo "Activating vllm environment"
source activate vllm
pip install vllm

View File

@ -105,9 +105,10 @@ class Service:
# run vllm
if platform.system() == 'Windows':
subprocess.Popen(['wsl', 'bash', '$HOME/run_vllm.sh '])
#subprocess.Popen(['wsl', 'ls', '$HOME'])
subprocess.Popen(['wsl', 'bash', '$HOME/run_vllm.sh'])
else:
subprocess.Popen(['bash', f'{Path.home()}/run_vllm.sh'])
subprocess.Popen(['bash', f'{Path.home()}/run_vllm.sh', self.app.config.vllm_model_path])
# Wait until the service is available at http://127.0.0.1:7860/
self.wait_for_service(max_retries=wait_max_retries)

View File

@ -1,9 +1,10 @@
#!/bin/bash
cd ~/vllm
PATH="$HOME/miniconda3/bin:$PATH"
export PATH
source activate vllm && python -m vllm.entrypoints.openai.api_server --model %1
echo "Initializing conda"
$HOME/miniconda3/bin/conda init --all
source activate vllm && python -m vllm.entrypoints.openai.api_server --model "$1"
# Wait for all background processes to finish
wait