mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-09 12:03:15 +00:00
example(functions): Add OpenAI functions example (#767)
This commit is contained in:
parent
3c6b798522
commit
8379cce209
@ -64,6 +64,14 @@ A ready to use example to show e2e how to integrate LocalAI with langchain
|
|||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-python/)
|
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-python/)
|
||||||
|
|
||||||
|
### LocalAI functions
|
||||||
|
|
||||||
|
_by [@mudler](https://github.com/mudler)_
|
||||||
|
|
||||||
|
A ready to use example to show how to use OpenAI functions with LocalAI
|
||||||
|
|
||||||
|
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/functions/)
|
||||||
|
|
||||||
### LocalAI WebUI
|
### LocalAI WebUI
|
||||||
|
|
||||||
_by [@dhruvgera](https://github.com/dhruvgera)_
|
_by [@dhruvgera](https://github.com/dhruvgera)_
|
||||||
|
9
examples/functions/.env
Normal file
9
examples/functions/.env
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
OPENAI_API_KEY=sk---anystringhere
|
||||||
|
OPENAI_API_BASE=http://api:8080/v1
|
||||||
|
# Models to preload at start
|
||||||
|
# Here we configure gpt4all as gpt-3.5-turbo and bert as embeddings
|
||||||
|
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/openllama-7b-open-instruct.yaml", "name": "gpt-3.5-turbo"}]
|
||||||
|
|
||||||
|
## Change the default number of threads
|
||||||
|
#THREADS=14
|
||||||
|
|
5
examples/functions/Dockerfile
Normal file
5
examples/functions/Dockerfile
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
FROM python:3.10-bullseye
|
||||||
|
COPY . /app
|
||||||
|
WORKDIR /app
|
||||||
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
ENTRYPOINT [ "python", "./functions-openai.py" ];
|
18
examples/functions/README.md
Normal file
18
examples/functions/README.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# LocalAI functions
|
||||||
|
|
||||||
|
Example of using LocalAI functions, see the [OpenAI](https://openai.com/blog/function-calling-and-other-api-updates) blog post.
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone LocalAI
|
||||||
|
git clone https://github.com/go-skynet/LocalAI
|
||||||
|
|
||||||
|
cd LocalAI/examples/functions
|
||||||
|
|
||||||
|
docker-compose run --rm functions
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: The example automatically downloads the `openllama` model as it is under a permissive license.
|
||||||
|
|
||||||
|
See the `.env` configuration file to set a different model with the [model-gallery](https://github.com/go-skynet/model-gallery) by editing `PRELOAD_MODELS`.
|
23
examples/functions/docker-compose.yaml
Normal file
23
examples/functions/docker-compose.yaml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
version: "3.9"
|
||||||
|
services:
|
||||||
|
api:
|
||||||
|
image: quay.io/go-skynet/local-ai:master
|
||||||
|
ports:
|
||||||
|
- 8080:8080
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
environment:
|
||||||
|
- DEBUG=true
|
||||||
|
- MODELS_PATH=/models
|
||||||
|
volumes:
|
||||||
|
- ./models:/models:cached
|
||||||
|
command: ["/usr/bin/local-ai" ]
|
||||||
|
functions:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
depends_on:
|
||||||
|
api:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file:
|
||||||
|
- .env
|
76
examples/functions/functions-openai.py
Normal file
76
examples/functions/functions-openai.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
import openai
|
||||||
|
import json
|
||||||
|
|
||||||
|
# Example dummy function hard coded to return the same weather
|
||||||
|
# In production, this could be your backend API or an external API
|
||||||
|
def get_current_weather(location, unit="fahrenheit"):
|
||||||
|
"""Get the current weather in a given location"""
|
||||||
|
weather_info = {
|
||||||
|
"location": location,
|
||||||
|
"temperature": "72",
|
||||||
|
"unit": unit,
|
||||||
|
"forecast": ["sunny", "windy"],
|
||||||
|
}
|
||||||
|
return json.dumps(weather_info)
|
||||||
|
|
||||||
|
|
||||||
|
def run_conversation():
|
||||||
|
# Step 1: send the conversation and available functions to GPT
|
||||||
|
messages = [{"role": "user", "content": "What's the weather like in Boston?"}]
|
||||||
|
functions = [
|
||||||
|
{
|
||||||
|
"name": "get_current_weather",
|
||||||
|
"description": "Get the current weather in a given location",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"location": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The city and state, e.g. San Francisco, CA",
|
||||||
|
},
|
||||||
|
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
|
||||||
|
},
|
||||||
|
"required": ["location"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=messages,
|
||||||
|
functions=functions,
|
||||||
|
function_call="auto", # auto is default, but we'll be explicit
|
||||||
|
)
|
||||||
|
response_message = response["choices"][0]["message"]
|
||||||
|
|
||||||
|
# Step 2: check if GPT wanted to call a function
|
||||||
|
if response_message.get("function_call"):
|
||||||
|
# Step 3: call the function
|
||||||
|
# Note: the JSON response may not always be valid; be sure to handle errors
|
||||||
|
available_functions = {
|
||||||
|
"get_current_weather": get_current_weather,
|
||||||
|
} # only one function in this example, but you can have multiple
|
||||||
|
function_name = response_message["function_call"]["name"]
|
||||||
|
fuction_to_call = available_functions[function_name]
|
||||||
|
function_args = json.loads(response_message["function_call"]["arguments"])
|
||||||
|
function_response = fuction_to_call(
|
||||||
|
location=function_args.get("location"),
|
||||||
|
unit=function_args.get("unit"),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step 4: send the info on the function call and function response to GPT
|
||||||
|
messages.append(response_message) # extend conversation with assistant's reply
|
||||||
|
messages.append(
|
||||||
|
{
|
||||||
|
"role": "function",
|
||||||
|
"name": function_name,
|
||||||
|
"content": function_response,
|
||||||
|
}
|
||||||
|
) # extend conversation with function response
|
||||||
|
second_response = openai.ChatCompletion.create(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=messages,
|
||||||
|
) # get a new response from GPT where it can see the function response
|
||||||
|
return second_response
|
||||||
|
|
||||||
|
|
||||||
|
print(run_conversation())
|
2
examples/functions/requirements.txt
Normal file
2
examples/functions/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
langchain==0.0.234
|
||||||
|
openai==0.27.8
|
Loading…
x
Reference in New Issue
Block a user