From 2f3c3b18676422c93ba142919ece7b495d8238a2 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 30 May 2023 18:34:43 +0200 Subject: [PATCH] examples: keep old example around (#439) --- examples/README.md | 2 + examples/chatbot-ui-manual/README.md | 48 +++++++++++++++++++ .../chatbot-ui-manual/docker-compose.yaml | 24 ++++++++++ .../chatbot-ui-manual/models/completion.tmpl | 1 + .../models/gpt-3.5-turbo.yaml | 16 +++++++ .../chatbot-ui-manual/models/gpt4all.tmpl | 4 ++ 6 files changed, 95 insertions(+) create mode 100644 examples/chatbot-ui-manual/README.md create mode 100644 examples/chatbot-ui-manual/docker-compose.yaml create mode 100644 examples/chatbot-ui-manual/models/completion.tmpl create mode 100644 examples/chatbot-ui-manual/models/gpt-3.5-turbo.yaml create mode 100644 examples/chatbot-ui-manual/models/gpt4all.tmpl diff --git a/examples/README.md b/examples/README.md index e3257cdb..2285ed19 100644 --- a/examples/README.md +++ b/examples/README.md @@ -22,6 +22,8 @@ This integration shows how to use LocalAI with [mckaywrigley/chatbot-ui](https:/ [Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/) +There is also a separate example to show how to manually setup a model: [example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual/) + ### Flowise _by [@mudler](https://github.com/mudler)_ diff --git a/examples/chatbot-ui-manual/README.md b/examples/chatbot-ui-manual/README.md new file mode 100644 index 00000000..7cf4bbb2 --- /dev/null +++ b/examples/chatbot-ui-manual/README.md @@ -0,0 +1,48 @@ +# chatbot-ui + +Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui). + +![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) + +## Setup + +```bash +# Clone LocalAI +git clone https://github.com/go-skynet/LocalAI + +cd LocalAI/examples/chatbot-ui + +# (optional) Checkout a specific LocalAI tag +# git checkout -b build + +# Download gpt4all-j to models/ +wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j + +# start with docker-compose +docker-compose up -d --pull always +# or you can build the images with: +# docker-compose up -d --build +``` + +## Pointing chatbot-ui to a separately managed LocalAI service + +If you want to use the [chatbot-ui example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) with an externally managed LocalAI service, you can alter the `docker-compose` file so that it looks like the below. You will notice the file is smaller, because we have removed the section that would normally start the LocalAI service. Take care to update the IP address (or FQDN) that the chatbot-ui service tries to access (marked `<>` below): +``` +version: '3.6' + +services: + chatgpt: + image: ghcr.io/mckaywrigley/chatbot-ui:main + ports: + - 3000:3000 + environment: + - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' + - 'OPENAI_API_HOST=http://<>:8080' +``` + +Once you've edited the Dockerfile, you can start it with `docker compose up`, then browse to `http://localhost:3000`. + +## Accessing chatbot-ui + +Open http://localhost:3000 for the Web UI. + diff --git a/examples/chatbot-ui-manual/docker-compose.yaml b/examples/chatbot-ui-manual/docker-compose.yaml new file mode 100644 index 00000000..c7782c34 --- /dev/null +++ b/examples/chatbot-ui-manual/docker-compose.yaml @@ -0,0 +1,24 @@ +version: '3.6' + +services: + api: + image: quay.io/go-skynet/local-ai:latest + build: + context: ../../ + dockerfile: Dockerfile + ports: + - 8080:8080 + environment: + - DEBUG=true + - MODELS_PATH=/models + volumes: + - ./models:/models:cached + command: ["/usr/bin/local-ai" ] + + chatgpt: + image: ghcr.io/mckaywrigley/chatbot-ui:main + ports: + - 3000:3000 + environment: + - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' + - 'OPENAI_API_HOST=http://api:8080' \ No newline at end of file diff --git a/examples/chatbot-ui-manual/models/completion.tmpl b/examples/chatbot-ui-manual/models/completion.tmpl new file mode 100644 index 00000000..9867cfcd --- /dev/null +++ b/examples/chatbot-ui-manual/models/completion.tmpl @@ -0,0 +1 @@ +{{.Input}} \ No newline at end of file diff --git a/examples/chatbot-ui-manual/models/gpt-3.5-turbo.yaml b/examples/chatbot-ui-manual/models/gpt-3.5-turbo.yaml new file mode 100644 index 00000000..5c192f5d --- /dev/null +++ b/examples/chatbot-ui-manual/models/gpt-3.5-turbo.yaml @@ -0,0 +1,16 @@ +name: gpt-3.5-turbo +parameters: + model: ggml-gpt4all-j + top_k: 80 + temperature: 0.2 + top_p: 0.7 +context_size: 1024 +stopwords: +- "HUMAN:" +- "GPT:" +roles: + user: " " + system: " " +template: + completion: completion + chat: gpt4all \ No newline at end of file diff --git a/examples/chatbot-ui-manual/models/gpt4all.tmpl b/examples/chatbot-ui-manual/models/gpt4all.tmpl new file mode 100644 index 00000000..f76b080a --- /dev/null +++ b/examples/chatbot-ui-manual/models/gpt4all.tmpl @@ -0,0 +1,4 @@ +The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response. +### Prompt: +{{.Input}} +### Response: