mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-18 20:27:57 +00:00
Merge branch 'master' into cleanup_deps
This commit is contained in:
commit
2a03905920
@ -1,2 +1,3 @@
|
||||
git+https://github.com/huggingface/parler-tts.git@8e465f1b5fcd223478e07175cb40494d19ffbe17
|
||||
llvmlite==0.43.0
|
||||
numba==0.60.0
|
||||
|
@ -6,11 +6,7 @@
|
||||
rel="stylesheet"
|
||||
href="/static/assets/highlightjs.css"
|
||||
/>
|
||||
<script defer src="/static/assets/anime.min.js"></script>
|
||||
<script
|
||||
defer
|
||||
src="/static/assets/highlightjs.js"
|
||||
></script>
|
||||
<script defer src="/static/assets/highlightjs.js"></script>
|
||||
<script
|
||||
defer
|
||||
src="/static/assets/alpine.js"
|
||||
|
@ -130,8 +130,10 @@ There are options that can be tweaked or parameters that can be set using enviro
|
||||
| Environment Variable | Description |
|
||||
|----------------------|-------------|
|
||||
| **LOCALAI_P2P_DISABLE_DHT** | Set to "true" to disable DHT and enable p2p layer to be local only (mDNS) |
|
||||
| **LOCALAI_P2P_DISABLE_LIMITS** | Set to "true" to disable connection limits and resources management |
|
||||
| **LOCALAI_P2P_ENABLE_LIMITS** | Set to "true" to enable connection limits and resources management (useful when running with poor connectivity or want to limit resources consumption) |
|
||||
| **LOCALAI_P2P_TOKEN** | Set the token for the p2p network |
|
||||
| **LOCALAI_P2P_LOGLEVEL** | Set the loglevel for the LocalAI p2p stack (default: info) |
|
||||
| **LOCALAI_LIBP2P_LOGLEVEL** | Set the loglevel for the underlying libp2p stack (default: fatal) |
|
||||
|
||||
## Architecture
|
||||
|
||||
|
@ -40,43 +40,121 @@ parameters:
|
||||
To use the functions with the OpenAI client in python:
|
||||
|
||||
```python
|
||||
import openai
|
||||
from openai import OpenAI
|
||||
|
||||
# ...
|
||||
# Send the conversation and available functions to GPT
|
||||
messages = [{"role": "user", "content": "What's the weather like in Boston?"}]
|
||||
functions = [
|
||||
messages = [{"role": "user", "content": "What is the weather like in Beijing now?"}]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_weather",
|
||||
"description": "Get the current weather in a given location",
|
||||
"description": "Return the temperature of the specified region specified by the user",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
"description": "User specified region",
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"enum": ["celsius", "fahrenheit"],
|
||||
"description": "temperature unit"
|
||||
},
|
||||
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=messages,
|
||||
functions=functions,
|
||||
function_call="auto",
|
||||
|
||||
client = OpenAI(
|
||||
# This is the default and can be omitted
|
||||
api_key="test",
|
||||
base_url="http://localhost:8080/v1/"
|
||||
)
|
||||
# ...
|
||||
|
||||
response =client.chat.completions.create(
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
tool_choice ="auto",
|
||||
model="gpt-4",
|
||||
)
|
||||
#...
|
||||
```
|
||||
|
||||
{{% alert note %}}
|
||||
When running the python script, be sure to:
|
||||
For example, with curl:
|
||||
|
||||
- Set `OPENAI_API_KEY` environment variable to a random string (the OpenAI api key is NOT required!)
|
||||
- Set `OPENAI_API_BASE` to point to your LocalAI service, for example `OPENAI_API_BASE=http://localhost:8080`
|
||||
```bash
|
||||
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||
"model": "gpt-4",
|
||||
"messages": [{"role": "user", "content": "What is the weather like in Beijing now?"}],
|
||||
"tools": [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_weather",
|
||||
"description": "Return the temperature of the specified region specified by the user",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "User specified region"
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"enum": ["celsius", "fahrenheit"],
|
||||
"description": "temperature unit"
|
||||
}
|
||||
},
|
||||
"required": ["location"]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"tool_choice":"auto"
|
||||
}'
|
||||
```
|
||||
|
||||
{{% /alert %}}
|
||||
Return data:
|
||||
|
||||
```json
|
||||
{
|
||||
"created": 1724210813,
|
||||
"object": "chat.completion",
|
||||
"id": "16b57014-477c-4e6b-8d25-aad028a5625e",
|
||||
"model": "gpt-4",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"finish_reason": "tool_calls",
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [
|
||||
{
|
||||
"index": 0,
|
||||
"id": "16b57014-477c-4e6b-8d25-aad028a5625e",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_weather",
|
||||
"arguments": "{\"location\":\"Beijing\",\"unit\":\"celsius\"}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 221,
|
||||
"completion_tokens": 26,
|
||||
"total_tokens": 247
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced
|
||||
|
||||
|
@ -57,6 +57,3 @@
|
||||
- filename: "KFOlCnqEu92Fr1MmYUtfBBc9.ttf"
|
||||
url: "https://fonts.gstatic.com/s/roboto/v30/KFOlCnqEu92Fr1MmYUtfBBc9.ttf"
|
||||
sha: "361a50f8a6c816ba4306c5290b7e487a726e1b4dcc3d8d7e4acf1fc2dae9f551"
|
||||
- filename: "anime.js"
|
||||
url: "https://raw.githubusercontent.com/juliangarnier/anime/master/lib/anime.min.js"
|
||||
sha: "bceef94f964481f7680d95e7fbbe5a8c20d3945a926a754874898a578db7c7ab"
|
Loading…
Reference in New Issue
Block a user