mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-04 09:42:53 +00:00
Minor fixes (#285)
This commit is contained in:
parent
7e4616646f
commit
3f739575d8
5
.github/workflows/bump_deps.yaml
vendored
5
.github/workflows/bump_deps.yaml
vendored
@ -9,9 +9,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- repository: "go-skynet/go-gpt4all-j.cpp"
|
|
||||||
variable: "GOGPT4ALLJ_VERSION"
|
|
||||||
branch: "master"
|
|
||||||
- repository: "go-skynet/go-llama.cpp"
|
- repository: "go-skynet/go-llama.cpp"
|
||||||
variable: "GOLLAMA_VERSION"
|
variable: "GOLLAMA_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
@ -30,7 +27,7 @@ jobs:
|
|||||||
- repository: "go-skynet/bloomz.cpp"
|
- repository: "go-skynet/bloomz.cpp"
|
||||||
variable: "BLOOMZ_VERSION"
|
variable: "BLOOMZ_VERSION"
|
||||||
branch: "main"
|
branch: "main"
|
||||||
- repository: "go-skynet/gpt4all"
|
- repository: "nomic-ai/gpt4all"
|
||||||
variable: "GPT4ALL_VERSION"
|
variable: "GPT4ALL_VERSION"
|
||||||
branch: "main"
|
branch: "main"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -696,6 +696,8 @@ curl http://localhost:8080/v1/models
|
|||||||
|
|
||||||
### Embeddings
|
### Embeddings
|
||||||
|
|
||||||
|
OpenAI docs: https://platform.openai.com/docs/api-reference/embeddings
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
|
|
||||||
The embedding endpoint is experimental and enabled only if the model is configured with `embeddings: true` in its `yaml` file, for example:
|
The embedding endpoint is experimental and enabled only if the model is configured with `embeddings: true` in its `yaml` file, for example:
|
||||||
@ -742,6 +744,8 @@ curl http://localhost:8080/v1/audio/transcriptions -H "Content-Type: multipart/f
|
|||||||
|
|
||||||
### Image generation
|
### Image generation
|
||||||
|
|
||||||
|
OpenAI docs: https://platform.openai.com/docs/api-reference/images/create
|
||||||
|
|
||||||
LocalAI supports generating images with Stable diffusion, running on CPU.
|
LocalAI supports generating images with Stable diffusion, running on CPU.
|
||||||
|
|
||||||
| mode=0 | mode=1 (winograd/sgemm) |
|
| mode=0 | mode=1 (winograd/sgemm) |
|
||||||
@ -773,6 +777,8 @@ curl http://localhost:8080/v1/images/generations -H "Content-Type: application/j
|
|||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Note: image generator supports images up to 512x512. You can use other tools however to upscale the image, for instance: https://github.com/upscayl/upscayl.
|
||||||
|
|
||||||
#### Setup
|
#### Setup
|
||||||
|
|
||||||
Note: In order to use the `images/generation` endpoint, you need to build LocalAI with `GO_TAGS=stablediffusion`.
|
Note: In order to use the `images/generation` endpoint, you need to build LocalAI with `GO_TAGS=stablediffusion`.
|
||||||
@ -847,7 +853,7 @@ Yes! If the client uses OpenAI and supports setting a different base URL to send
|
|||||||
|
|
||||||
<details>
|
<details>
|
||||||
|
|
||||||
Not currently, as ggml doesn't support GPUs yet: https://github.com/ggerganov/llama.cpp/discussions/915.
|
There is partial GPU support, see build instructions above.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
@ -289,12 +289,14 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
|||||||
|
|
||||||
mess := []string{}
|
mess := []string{}
|
||||||
for _, i := range input.Messages {
|
for _, i := range input.Messages {
|
||||||
|
var content string
|
||||||
r := config.Roles[i.Role]
|
r := config.Roles[i.Role]
|
||||||
if r == "" {
|
if r != "" {
|
||||||
r = i.Role
|
content = fmt.Sprint(r, " ", i.Content)
|
||||||
|
} else {
|
||||||
|
content = i.Content
|
||||||
}
|
}
|
||||||
|
|
||||||
content := fmt.Sprint(r, " ", i.Content)
|
|
||||||
mess = append(mess, content)
|
mess = append(mess, content)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -491,6 +493,11 @@ func imageEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, image
|
|||||||
|
|
||||||
var result []Item
|
var result []Item
|
||||||
for _, i := range config.PromptStrings {
|
for _, i := range config.PromptStrings {
|
||||||
|
n := input.N
|
||||||
|
if input.N == 0 {
|
||||||
|
n = 1
|
||||||
|
}
|
||||||
|
for j := 0; j < n; j++ {
|
||||||
prompts := strings.Split(i, "|")
|
prompts := strings.Split(i, "|")
|
||||||
positive_prompt := prompts[0]
|
positive_prompt := prompts[0]
|
||||||
negative_prompt := ""
|
negative_prompt := ""
|
||||||
@ -552,6 +559,7 @@ func imageEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, image
|
|||||||
|
|
||||||
result = append(result, *item)
|
result = append(result, *item)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
resp := &OpenAIResponse{
|
resp := &OpenAIResponse{
|
||||||
Data: result,
|
Data: result,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user