mirror of
https://github.com/ParisNeo/lollms.git
synced 2024-12-24 06:46:40 +00:00
towards working vllm and petals
This commit is contained in:
parent
3a3e58e28f
commit
4141260697
@ -8,15 +8,20 @@ else
|
||||
echo Installing conda
|
||||
curl -LO https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
|
||||
./Miniconda3-latest-Linux-x86_64.sh -b
|
||||
$HOME/miniconda3/bin/conda init --all
|
||||
rm ./Miniconda3-latest-Linux-x86_64.sh
|
||||
echo Done
|
||||
fi
|
||||
PATH="$HOME/miniconda3/bin:$PATH"
|
||||
conda init
|
||||
export PATH
|
||||
echo "Installing vllm"
|
||||
conda create -n vllm python=3.9 -y
|
||||
conda activate vllm
|
||||
pip install vllm
|
||||
echo "Initializing conda"
|
||||
conda init --all
|
||||
export PATH
|
||||
echo "Installing petals"
|
||||
conda create -n petals python=3.9 -y
|
||||
echo "Activating petals environment"
|
||||
source activate petals
|
||||
pip install petals
|
||||
git clone https://github.com/ParisNeo/petals_server.git
|
||||
cd petals_server
|
||||
pip install -e .
|
||||
echo "Done"
|
||||
|
@ -3,7 +3,7 @@
|
||||
cd ~/vllm
|
||||
PATH="$HOME/miniconda3/bin:$PATH"
|
||||
export PATH
|
||||
conda activate vllm && python -m vllm.entrypoints.openai.api_server --model %1
|
||||
conda activate vllm && python -m vllm.entrypoints.openai.api_server --model_name "$1" --node_name "$2" --device "$3"
|
||||
|
||||
# Wait for all background processes to finish
|
||||
wait
|
@ -11,11 +11,13 @@ else
|
||||
rm ./Miniconda3-latest-Linux-x86_64.sh
|
||||
echo Done
|
||||
fi
|
||||
PATH="$HOME/miniconda3/bin:$PATH"
|
||||
export PATH
|
||||
echo "Initializing conda"
|
||||
$HOME/miniconda3/bin/conda init --all
|
||||
conda init --all
|
||||
export PATH
|
||||
echo "Installing vllm"
|
||||
$HOME/miniconda3/bin/conda create -n vllm python=3.9 -y
|
||||
conda create -n vllm python=3.9 -y
|
||||
echo "Activating vllm environment"
|
||||
source activate vllm
|
||||
pip install vllm
|
||||
|
@ -105,9 +105,10 @@ class Service:
|
||||
|
||||
# run vllm
|
||||
if platform.system() == 'Windows':
|
||||
subprocess.Popen(['wsl', 'bash', '$HOME/run_vllm.sh '])
|
||||
#subprocess.Popen(['wsl', 'ls', '$HOME'])
|
||||
subprocess.Popen(['wsl', 'bash', '$HOME/run_vllm.sh'])
|
||||
else:
|
||||
subprocess.Popen(['bash', f'{Path.home()}/run_vllm.sh'])
|
||||
subprocess.Popen(['bash', f'{Path.home()}/run_vllm.sh', self.app.config.vllm_model_path])
|
||||
|
||||
# Wait until the service is available at http://127.0.0.1:7860/
|
||||
self.wait_for_service(max_retries=wait_max_retries)
|
||||
|
@ -1,9 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd ~/vllm
|
||||
PATH="$HOME/miniconda3/bin:$PATH"
|
||||
export PATH
|
||||
source activate vllm && python -m vllm.entrypoints.openai.api_server --model %1
|
||||
echo "Initializing conda"
|
||||
$HOME/miniconda3/bin/conda init --all
|
||||
source activate vllm && python -m vllm.entrypoints.openai.api_server --model "$1"
|
||||
|
||||
# Wait for all background processes to finish
|
||||
wait
|
Loading…
Reference in New Issue
Block a user