diff --git a/lollms/services/ollama/lollms_ollama.py b/lollms/services/ollama/lollms_ollama.py index 804e7f1..8b3d4c8 100644 --- a/lollms/services/ollama/lollms_ollama.py +++ b/lollms/services/ollama/lollms_ollama.py @@ -86,9 +86,9 @@ class Service: # run ollama if platform.system() == 'Windows': - subprocess.run(['wsl', 'bash', '~/run_ollama.sh']) + subprocess.Popen(['wsl', 'bash', '~/run_ollama.sh']) else: - subprocess.run(['bash', '~/run_ollama.sh']) + subprocess.Popen(['bash', '~/run_ollama.sh']) diff --git a/lollms/services/ollama/run_ollama.sh b/lollms/services/ollama/run_ollama.sh index 23a8e32..e457e76 100644 --- a/lollms/services/ollama/run_ollama.sh +++ b/lollms/services/ollama/run_ollama.sh @@ -4,12 +4,12 @@ OLLAMA_HOST="0.0.0.0:11434" # Start the OLLAMA server -ollama serve & +OLLAMA_MODELS=~/ollama/models ollama serve & # Check if models.txt exists if [ ! -f models.txt ]; then # Create models.txt and add "mixtral" to it - echo "mixtral" > models.txt + echo "mistral" > models.txt fi # Read the models from the file