diff --git a/examples/bruno/LocalAI Test Requests/Sound Generation/musicgen.bru b/.bruno/LocalAI Test Requests/Sound Generation/musicgen.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/Sound Generation/musicgen.bru rename to .bruno/LocalAI Test Requests/Sound Generation/musicgen.bru diff --git a/examples/bruno/LocalAI Test Requests/backend monitor/backend monitor.bru b/.bruno/LocalAI Test Requests/backend monitor/backend monitor.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/backend monitor/backend monitor.bru rename to .bruno/LocalAI Test Requests/backend monitor/backend monitor.bru diff --git a/examples/bruno/LocalAI Test Requests/backend monitor/backend-shutdown.bru b/.bruno/LocalAI Test Requests/backend monitor/backend-shutdown.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/backend monitor/backend-shutdown.bru rename to .bruno/LocalAI Test Requests/backend monitor/backend-shutdown.bru diff --git a/examples/bruno/LocalAI Test Requests/bruno.json b/.bruno/LocalAI Test Requests/bruno.json similarity index 100% rename from examples/bruno/LocalAI Test Requests/bruno.json rename to .bruno/LocalAI Test Requests/bruno.json diff --git a/examples/bruno/LocalAI Test Requests/environments/localhost.bru b/.bruno/LocalAI Test Requests/environments/localhost.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/environments/localhost.bru rename to .bruno/LocalAI Test Requests/environments/localhost.bru diff --git a/examples/bruno/LocalAI Test Requests/get models list.bru b/.bruno/LocalAI Test Requests/get models list.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/get models list.bru rename to .bruno/LocalAI Test Requests/get models list.bru diff --git a/examples/bruno/LocalAI Test Requests/image generation/Generate image.bru b/.bruno/LocalAI Test Requests/image generation/Generate image.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/image generation/Generate image.bru rename to .bruno/LocalAI Test Requests/image generation/Generate image.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/-completions.bru b/.bruno/LocalAI Test Requests/llm text/-completions.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/-completions.bru rename to .bruno/LocalAI Test Requests/llm text/-completions.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/-edits.bru b/.bruno/LocalAI Test Requests/llm text/-edits.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/-edits.bru rename to .bruno/LocalAI Test Requests/llm text/-edits.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/-embeddings.bru b/.bruno/LocalAI Test Requests/llm text/-embeddings.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/-embeddings.bru rename to .bruno/LocalAI Test Requests/llm text/-embeddings.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/chat/chat completion -simple- 1 message-.bru b/.bruno/LocalAI Test Requests/llm text/chat/chat completion -simple- 1 message-.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/chat/chat completion -simple- 1 message-.bru rename to .bruno/LocalAI Test Requests/llm text/chat/chat completion -simple- 1 message-.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/chat/chat-completions -long-.bru b/.bruno/LocalAI Test Requests/llm text/chat/chat-completions -long-.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/chat/chat-completions -long-.bru rename to .bruno/LocalAI Test Requests/llm text/chat/chat-completions -long-.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/chat/chat-completions -stream-.bru b/.bruno/LocalAI Test Requests/llm text/chat/chat-completions -stream-.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/chat/chat-completions -stream-.bru rename to .bruno/LocalAI Test Requests/llm text/chat/chat-completions -stream-.bru diff --git a/examples/bruno/LocalAI Test Requests/model gallery/add model gallery.bru b/.bruno/LocalAI Test Requests/model gallery/add model gallery.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/add model gallery.bru rename to .bruno/LocalAI Test Requests/model gallery/add model gallery.bru diff --git a/examples/bruno/LocalAI Test Requests/model gallery/delete model gallery.bru b/.bruno/LocalAI Test Requests/model gallery/delete model gallery.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/delete model gallery.bru rename to .bruno/LocalAI Test Requests/model gallery/delete model gallery.bru diff --git a/examples/bruno/LocalAI Test Requests/model gallery/list MODELS in galleries.bru b/.bruno/LocalAI Test Requests/model gallery/list MODELS in galleries.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/list MODELS in galleries.bru rename to .bruno/LocalAI Test Requests/model gallery/list MODELS in galleries.bru diff --git a/examples/bruno/LocalAI Test Requests/model gallery/list model GALLERIES.bru b/.bruno/LocalAI Test Requests/model gallery/list model GALLERIES.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/list model GALLERIES.bru rename to .bruno/LocalAI Test Requests/model gallery/list model GALLERIES.bru diff --git a/.bruno/LocalAI Test Requests/model gallery/model delete.bru b/.bruno/LocalAI Test Requests/model gallery/model delete.bru new file mode 100644 index 00000000..b320dae3 --- /dev/null +++ b/.bruno/LocalAI Test Requests/model gallery/model delete.bru @@ -0,0 +1,11 @@ +meta { + name: model delete + type: http + seq: 7 +} + +post { + url: {{PROTOCOL}}{{HOST}}:{{PORT}}/models/galleries + body: none + auth: none +} diff --git a/examples/bruno/LocalAI Test Requests/model gallery/model gallery apply -gist-.bru b/.bruno/LocalAI Test Requests/model gallery/model gallery apply -gist-.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/model gallery apply -gist-.bru rename to .bruno/LocalAI Test Requests/model gallery/model gallery apply -gist-.bru diff --git a/examples/bruno/LocalAI Test Requests/model gallery/model gallery apply.bru b/.bruno/LocalAI Test Requests/model gallery/model gallery apply.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/model gallery apply.bru rename to .bruno/LocalAI Test Requests/model gallery/model gallery apply.bru diff --git a/.bruno/LocalAI Test Requests/transcription/gb1.ogg b/.bruno/LocalAI Test Requests/transcription/gb1.ogg new file mode 100644 index 00000000..df22d636 Binary files /dev/null and b/.bruno/LocalAI Test Requests/transcription/gb1.ogg differ diff --git a/.bruno/LocalAI Test Requests/transcription/transcribe.bru b/.bruno/LocalAI Test Requests/transcription/transcribe.bru new file mode 100644 index 00000000..831aad90 --- /dev/null +++ b/.bruno/LocalAI Test Requests/transcription/transcribe.bru @@ -0,0 +1,16 @@ +meta { + name: transcribe + type: http + seq: 1 +} + +post { + url: {{PROTOCOL}}{{HOST}}:{{PORT}}/v1/audio/transcriptions + body: multipartForm + auth: none +} + +body:multipart-form { + file: @file(transcription/gb1.ogg) + model: whisper-1 +} diff --git a/examples/bruno/LocalAI Test Requests/tts/-tts.bru b/.bruno/LocalAI Test Requests/tts/-tts.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/tts/-tts.bru rename to .bruno/LocalAI Test Requests/tts/-tts.bru diff --git a/examples/bruno/LocalAI Test Requests/tts/musicgen.bru b/.bruno/LocalAI Test Requests/tts/musicgen.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/tts/musicgen.bru rename to .bruno/LocalAI Test Requests/tts/musicgen.bru diff --git a/README.md b/README.md index 0b2eee92..cb949f61 100644 --- a/README.md +++ b/README.md @@ -85,6 +85,7 @@ local-ai run oci://localai/phi-2:latest ## 📰 Latest project news +- Oct 2024: examples moved to [LocalAI-examples](https://github.com/mudler/LocalAI-examples) - Aug 2024: 🆕 FLUX-1, [P2P Explorer](https://explorer.localai.io) - July 2024: 🔥🔥 🆕 P2P Dashboard, LocalAI Federated mode and AI Swarms: https://github.com/mudler/LocalAI/pull/2723 - June 2024: 🆕 You can browse now the model gallery without LocalAI! Check out https://models.localai.io diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index debfe1a5..00000000 --- a/examples/README.md +++ /dev/null @@ -1,190 +0,0 @@ -# Examples - -| [ChatGPT OSS alternative](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) | [Image generation](https://localai.io/api-endpoints/index.html#image-generation) | -|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------| -| ![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) | ![b6441997879](https://github.com/go-skynet/LocalAI/assets/2420543/d50af51c-51b7-4f39-b6c2-bf04c403894c) | - -| [Telegram bot](https://github.com/go-skynet/LocalAI/tree/master/examples/telegram-bot) | [Flowise](https://github.com/go-skynet/LocalAI/tree/master/examples/flowise) | -|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------| -![Screenshot from 2023-06-09 00-36-26](https://github.com/go-skynet/LocalAI/assets/2420543/e98b4305-fa2d-41cf-9d2f-1bb2d75ca902) | ![Screenshot from 2023-05-30 18-01-03](https://github.com/go-skynet/LocalAI/assets/2420543/02458782-0549-4131-971c-95ee56ec1af8)| | - -Here is a list of projects that can easily be integrated with the LocalAI backend. - - -### Projects - -### AutoGPT - -_by [@mudler](https://github.com/mudler)_ - -This example shows how to use AutoGPT with LocalAI. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/autoGPT/) - -### Chatbot-UI - -_by [@mkellerman](https://github.com/mkellerman)_ - -![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) - -This integration shows how to use LocalAI with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui). - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/) - -There is also a separate example to show how to manually setup a model: [example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual/) - -### K8sGPT - -_by [@mudler](https://github.com/mudler)_ - -This example show how to use LocalAI inside Kubernetes with [k8sgpt](https://k8sgpt.ai). - -![Screenshot from 2023-06-19 23-58-47](https://github.com/go-skynet/go-ggml-transformers.cpp/assets/2420543/cab87409-ee68-44ae-8d53-41627fb49509) - -### Fine-tuning a model and convert it to gguf to use it with LocalAI - -_by [@mudler](https://github.com/mudler)_ - -This example is an e2e example on how to fine-tune a model with [axolotl](https://github.com/OpenAccess-AI-Collective/axolotl) and convert it to gguf to use it with LocalAI. - -[Check it out here](https://github.com/mudler/LocalAI/tree/master/examples/e2e-fine-tuning/) - -### Flowise - -_by [@mudler](https://github.com/mudler)_ - -This example shows how to use [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise) with LocalAI. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/flowise/) - -### Discord bot - -_by [@mudler](https://github.com/mudler)_ - -Run a discord bot which lets you talk directly with a model - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/discord-bot/), or for a live demo you can talk with our bot in #random-bot in our discord server. - -### Langchain - -_by [@dave-gray101](https://github.com/dave-gray101)_ - -A ready to use example to show e2e how to integrate LocalAI with langchain - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain/) - -### Langchain Python - -_by [@mudler](https://github.com/mudler)_ - -A ready to use example to show e2e how to integrate LocalAI with langchain - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-python/) - -### LocalAI functions - -_by [@mudler](https://github.com/mudler)_ - -A ready to use example to show how to use OpenAI functions with LocalAI - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/functions/) - -### LocalAI WebUI - -_by [@dhruvgera](https://github.com/dhruvgera)_ - -![image](https://user-images.githubusercontent.com/42107491/235344183-44b5967d-ba22-4331-804c-8da7004a5d35.png) - -A light, community-maintained web interface for LocalAI - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/localai-webui/) - -### How to run rwkv models - -_by [@mudler](https://github.com/mudler)_ - -A full example on how to run RWKV models with LocalAI - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/rwkv/) - -### PrivateGPT - -_by [@mudler](https://github.com/mudler)_ - -A full example on how to run PrivateGPT with LocalAI - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/privateGPT/) - -### Slack bot - -_by [@mudler](https://github.com/mudler)_ - -Run a slack bot which lets you talk directly with a model - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-bot/) - -### Slack bot (Question answering) - -_by [@mudler](https://github.com/mudler)_ - -Run a slack bot, ideally for teams, which lets you ask questions on a documentation website, or a github repository. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-qa-bot/) - -### Question answering on documents with llama-index - -_by [@mudler](https://github.com/mudler)_ - -Shows how to integrate with [Llama-Index](https://gpt-index.readthedocs.io/en/stable/getting_started/installation.html) to enable question answering on a set of documents. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/query_data/) - -### Question answering on documents with langchain and chroma - -_by [@mudler](https://github.com/mudler)_ - -Shows how to integrate with `Langchain` and `Chroma` to enable question answering on a set of documents. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-chroma/) - -### Telegram bot - -_by [@mudler](https://github.com/mudler) - -![Screenshot from 2023-06-09 00-36-26](https://github.com/go-skynet/LocalAI/assets/2420543/e98b4305-fa2d-41cf-9d2f-1bb2d75ca902) - -Use LocalAI to power a Telegram bot assistant, with Image generation and audio support! - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/telegram-bot/) - -### Template for Runpod.io - -_by [@fHachenberg](https://github.com/fHachenberg)_ - -Allows to run any LocalAI-compatible model as a backend on the servers of https://runpod.io - -[Check it out here](https://runpod.io/gsc?template=uv9mtqnrd0&ref=984wlcra) - -### Continue - -_by [@gruberdev](https://github.com/gruberdev)_ - -Screenshot - -Demonstrates how to integrate an open-source copilot alternative that enhances code analysis, completion, and improvements. This approach seamlessly integrates with any LocalAI model, offering a more user-friendly experience. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/continue/) - -### Streamlit bot - -_by [@majoshi1](https://github.com/majoshi1)_ - -![Screenshot](streamlit-bot/streamlit-bot.png) - -A chat bot made using `Streamlit` & LocalAI. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/streamlit-bot/) - -## Want to contribute? - -Create an issue, and put `Example: ` in the title! We will post your examples here. diff --git a/examples/autoGPT/.env.example b/examples/autoGPT/.env.example deleted file mode 100644 index df5cc0c3..00000000 --- a/examples/autoGPT/.env.example +++ /dev/null @@ -1,9 +0,0 @@ -# CPU .env docs: https://localai.io/howtos/easy-setup-docker-cpu/ -# GPU .env docs: https://localai.io/howtos/easy-setup-docker-gpu/ - -OPENAI_API_KEY=sk---anystringhere -OPENAI_API_BASE=http://api:8080/v1 -# Models to preload at start -# Here we configure gpt4all as gpt-3.5-turbo and bert as embeddings, -# see other options in the model gallery at https://github.com/go-skynet/model-gallery -PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, { "url": "github:go-skynet/model-gallery/bert-embeddings.yaml", "name": "text-embedding-ada-002"}] diff --git a/examples/autoGPT/README.md b/examples/autoGPT/README.md deleted file mode 100644 index 32793606..00000000 --- a/examples/autoGPT/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# AutoGPT - -Example of integration with [AutoGPT](https://github.com/Significant-Gravitas/Auto-GPT). - -## Run - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/autoGPT - -cp -rfv .env.example .env - -# Edit the .env file to set a different model by editing `PRELOAD_MODELS`. -vim .env - -docker-compose run --rm auto-gpt -``` - -Note: The example automatically downloads the `gpt4all` model as it is under a permissive license. The GPT4All model does not seem to be enough to run AutoGPT. WizardLM-7b-uncensored seems to perform better (with `f16: true`). - - -## Without docker - -Run AutoGPT with `OPENAI_API_BASE` pointing to the LocalAI endpoint. If you run it locally for instance: - -``` -OPENAI_API_BASE=http://localhost:8080 python ... -``` - -Note: you need a model named `gpt-3.5-turbo` and `text-embedding-ada-002`. You can preload those in LocalAI at start by setting in the env: - -``` -PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, { "url": "github:go-skynet/model-gallery/bert-embeddings.yaml", "name": "text-embedding-ada-002"}] -``` \ No newline at end of file diff --git a/examples/autoGPT/docker-compose.yaml b/examples/autoGPT/docker-compose.yaml deleted file mode 100644 index c3220ad1..00000000 --- a/examples/autoGPT/docker-compose.yaml +++ /dev/null @@ -1,42 +0,0 @@ -version: "3.9" -services: - api: - image: quay.io/go-skynet/local-ai:latest - ports: - - 8080:8080 - env_file: - - .env - environment: - - DEBUG=true - - MODELS_PATH=/models - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai" ] - auto-gpt: - image: significantgravitas/auto-gpt - depends_on: - api: - condition: service_healthy - redis: - condition: service_started - env_file: - - .env - environment: - MEMORY_BACKEND: ${MEMORY_BACKEND:-redis} - REDIS_HOST: ${REDIS_HOST:-redis} - profiles: ["exclude-from-up"] - volumes: - - ./auto_gpt_workspace:/app/autogpt/auto_gpt_workspace - - ./data:/app/data - ## allow auto-gpt to write logs to disk - - ./logs:/app/logs - ## uncomment following lines if you want to make use of these files - ## you must have them existing in the same folder as this docker-compose.yml - #- type: bind - # source: ./azure.yaml - # target: /app/azure.yaml - #- type: bind - # source: ./ai_settings.yaml - # target: /app/ai_settings.yaml - redis: - image: "redis/redis-stack-server:latest" diff --git a/examples/chainlit/Dockerfile b/examples/chainlit/Dockerfile deleted file mode 100644 index 5cf7a67d..00000000 --- a/examples/chainlit/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -# Use an official Python runtime as a parent image -FROM python:3.12-slim - -# Set the working directory in the container -WORKDIR /app - -# Copy the current directory contents into the container at /app -COPY requirements.txt /app - -# Install c++ compiler -RUN apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install any needed packages specified in requirements.txt -RUN pip install --no-cache-dir -r requirements.txt \ - && DEBIAN_FRONTEND=noninteractive apt-get remove -y build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -COPY . /app - -# Run app.py when the container launches -CMD ["chainlit", "run", "-h", "--host", "0.0.0.0", "main.py" ] diff --git a/examples/chainlit/README.md b/examples/chainlit/README.md deleted file mode 100644 index 9970b3b7..00000000 --- a/examples/chainlit/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# LocalAI Demonstration with Embeddings and Chainlit - -This demonstration shows you how to use embeddings with existing data in `LocalAI`, and how to integrate it with Chainlit for an interactive querying experience. We are using the `llama_index` library to facilitate the embedding and querying processes, and `chainlit` to provide an interactive interface. The `Weaviate` client is used as the embedding source. - -## Prerequisites - -Before proceeding, make sure you have the following installed: -- Weaviate client -- LocalAI and its dependencies -- Chainlit and its dependencies - -## Getting Started - -1. Clone this repository: -2. Navigate to the project directory: -3. Run the example: `chainlit run main.py` - -# Highlight on `llama_index` and `chainlit` - -`llama_index` is the key library that facilitates the process of embedding and querying data in LocalAI. It provides a seamless interface to integrate various components, such as `WeaviateVectorStore`, `LocalAI`, `ServiceContext`, and more, for a smooth querying experience. - -`chainlit` is used to provide an interactive interface for users to query the data and see the results in real-time. It integrates with llama_index to handle the querying process and display the results to the user. - -In this example, `llama_index` is used to set up the `VectorStoreIndex` and `QueryEngine`, and `chainlit` is used to handle the user interactions with `LocalAI` and display the results. - diff --git a/examples/chainlit/config.yaml b/examples/chainlit/config.yaml deleted file mode 100644 index 1590f7b8..00000000 --- a/examples/chainlit/config.yaml +++ /dev/null @@ -1,16 +0,0 @@ -localAI: - temperature: 0 - modelName: gpt-3.5-turbo - apiBase: http://local-ai.default - apiKey: stub - streaming: True -weviate: - url: http://weviate.local - index: AIChroma -query: - mode: hybrid - topK: 1 - alpha: 0.0 - chunkSize: 1024 -embedding: - model: BAAI/bge-small-en-v1.5 \ No newline at end of file diff --git a/examples/chainlit/main.py b/examples/chainlit/main.py deleted file mode 100644 index b57c7228..00000000 --- a/examples/chainlit/main.py +++ /dev/null @@ -1,82 +0,0 @@ -import os - -import weaviate -from llama_index.storage.storage_context import StorageContext -from llama_index.vector_stores import WeaviateVectorStore - -from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine -from llama_index.callbacks.base import CallbackManager -from llama_index import ( - LLMPredictor, - ServiceContext, - StorageContext, - VectorStoreIndex, -) -import chainlit as cl - -from llama_index.llms import LocalAI -from llama_index.embeddings import HuggingFaceEmbedding -import yaml - -# Load the configuration file -with open("config.yaml", "r") as ymlfile: - cfg = yaml.safe_load(ymlfile) - -# Get the values from the configuration file or set the default values -temperature = cfg['localAI'].get('temperature', 0) -model_name = cfg['localAI'].get('modelName', "gpt-3.5-turbo") -api_base = cfg['localAI'].get('apiBase', "http://local-ai.default") -api_key = cfg['localAI'].get('apiKey', "stub") -streaming = cfg['localAI'].get('streaming', True) -weaviate_url = cfg['weviate'].get('url', "http://weviate.default") -index_name = cfg['weviate'].get('index', "AIChroma") -query_mode = cfg['query'].get('mode', "hybrid") -topK = cfg['query'].get('topK', 1) -alpha = cfg['query'].get('alpha', 0.0) -embed_model_name = cfg['embedding'].get('model', "BAAI/bge-small-en-v1.5") -chunk_size = cfg['query'].get('chunkSize', 1024) - - -embed_model = HuggingFaceEmbedding(model_name=embed_model_name) - - -llm = LocalAI(temperature=temperature, model_name=model_name, api_base=api_base, api_key=api_key, streaming=streaming) -llm.globally_use_chat_completions = True; -client = weaviate.Client(weaviate_url) -vector_store = WeaviateVectorStore(weaviate_client=client, index_name=index_name) -storage_context = StorageContext.from_defaults(vector_store=vector_store) - -@cl.on_chat_start -async def factory(): - - llm_predictor = LLMPredictor( - llm=llm - ) - - service_context = ServiceContext.from_defaults(embed_model=embed_model, callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()]), llm_predictor=llm_predictor, chunk_size=chunk_size) - - index = VectorStoreIndex.from_vector_store( - vector_store, - storage_context=storage_context, - service_context=service_context - ) - - query_engine = index.as_query_engine(vector_store_query_mode=query_mode, similarity_top_k=topK, alpha=alpha, streaming=True) - - cl.user_session.set("query_engine", query_engine) - - -@cl.on_message -async def main(message: cl.Message): - query_engine = cl.user_session.get("query_engine") - response = await cl.make_async(query_engine.query)(message.content) - - response_message = cl.Message(content="") - - for token in response.response_gen: - await response_message.stream_token(token=token) - - if response.response_txt: - response_message.content = response.response_txt - - await response_message.send() diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt deleted file mode 100644 index adcf2fc8..00000000 --- a/examples/chainlit/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -llama_index==0.11.20 -requests==2.32.3 -weaviate_client==4.9.0 -transformers -torch -chainlit diff --git a/examples/chatbot-ui-manual/README.md b/examples/chatbot-ui-manual/README.md deleted file mode 100644 index 3238f326..00000000 --- a/examples/chatbot-ui-manual/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# chatbot-ui - -Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui). - -![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) - -## Setup - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/chatbot-ui - -# (optional) Checkout a specific LocalAI tag -# git checkout -b build - -# Download gpt4all-j to models/ -wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j - -# start with docker-compose -docker-compose up -d --pull always -# or you can build the images with: -# docker-compose up -d --build -``` - -Then browse to `http://localhost:3000` to view the Web UI. - -## Pointing chatbot-ui to a separately managed LocalAI service - -If you want to use the [chatbot-ui example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) with an externally managed LocalAI service, you can alter the `docker-compose.yaml` file so that it looks like the below. You will notice the file is smaller, because we have removed the section that would normally start the LocalAI service. Take care to update the IP address (or FQDN) that the chatbot-ui service tries to access (marked `<>` below): - -```yaml -version: '3.6' - -services: - chatgpt: - image: ghcr.io/mckaywrigley/chatbot-ui:main - ports: - - 3000:3000 - environment: - - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' - - 'OPENAI_API_HOST=http://<>:8080' -``` - -Once you've edited the `docker-compose.yaml`, you can start it with `docker compose up`, then browse to `http://localhost:3000` to view the Web UI. - -## Accessing chatbot-ui - -Open http://localhost:3000 for the Web UI. diff --git a/examples/chatbot-ui-manual/docker-compose.yaml b/examples/chatbot-ui-manual/docker-compose.yaml deleted file mode 100644 index c7782c34..00000000 --- a/examples/chatbot-ui-manual/docker-compose.yaml +++ /dev/null @@ -1,24 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - environment: - - DEBUG=true - - MODELS_PATH=/models - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai" ] - - chatgpt: - image: ghcr.io/mckaywrigley/chatbot-ui:main - ports: - - 3000:3000 - environment: - - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' - - 'OPENAI_API_HOST=http://api:8080' \ No newline at end of file diff --git a/examples/chatbot-ui-manual/models b/examples/chatbot-ui-manual/models deleted file mode 120000 index 1e266b1b..00000000 --- a/examples/chatbot-ui-manual/models +++ /dev/null @@ -1 +0,0 @@ -../models \ No newline at end of file diff --git a/examples/chatbot-ui/README.md b/examples/chatbot-ui/README.md deleted file mode 100644 index 3817aa85..00000000 --- a/examples/chatbot-ui/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# chatbot-ui - -Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui). - -![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) - -## Run - -In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml` -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/chatbot-ui - -# start with docker-compose -docker-compose up --pull always - -# or you can build the images with: -# docker-compose up -d --build -``` - -Then browse to `http://localhost:3000` to view the Web UI. - -## Pointing chatbot-ui to a separately managed LocalAI service - -If you want to use the [chatbot-ui example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) with an externally managed LocalAI service, you can alter the `docker-compose.yaml` file so that it looks like the below. You will notice the file is smaller, because we have removed the section that would normally start the LocalAI service. Take care to update the IP address (or FQDN) that the chatbot-ui service tries to access (marked `<>` below): - -```yaml -version: '3.6' - -services: - chatgpt: - image: ghcr.io/mckaywrigley/chatbot-ui:main - ports: - - 3000:3000 - environment: - - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' - - 'OPENAI_API_HOST=http://<>:8080' -``` - -Once you've edited the `docker-compose.yaml`, you can start it with `docker compose up`, then browse to `http://localhost:3000` to view the Web UI. - -## Accessing chatbot-ui - -Open http://localhost:3000 for the Web UI. diff --git a/examples/chatbot-ui/docker-compose.yaml b/examples/chatbot-ui/docker-compose.yaml deleted file mode 100644 index 27b4f4e0..00000000 --- a/examples/chatbot-ui/docker-compose.yaml +++ /dev/null @@ -1,37 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - # As initially LocalAI will download the models defined in PRELOAD_MODELS - # you might need to tweak the healthcheck values here according to your network connection. - # Here we give a timespan of 20m to download all the required files. - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] - interval: 1m - timeout: 20m - retries: 20 - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - environment: - - DEBUG=true - - MODELS_PATH=/models - # You can preload different models here as well. - # See: https://github.com/go-skynet/model-gallery - - 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]' - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai" ] - chatgpt: - depends_on: - api: - condition: service_healthy - image: ghcr.io/mckaywrigley/chatbot-ui:main - ports: - - 3000:3000 - environment: - - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' - - 'OPENAI_API_HOST=http://api:8080' \ No newline at end of file diff --git a/examples/configurations/README.md b/examples/configurations/README.md deleted file mode 100644 index 8b6a6560..00000000 --- a/examples/configurations/README.md +++ /dev/null @@ -1,95 +0,0 @@ -## Advanced configuration - -This section contains examples on how to install models manually with config files. - -### Prerequisites - -First clone LocalAI: - -```bash -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI -``` - -Setup the model you prefer from the examples below and then start LocalAI: - -```bash -docker compose up -d --pull always -``` - -If LocalAI is already started, you can restart it with - -```bash -docker compose restart -``` - -See also the getting started: https://localai.io/basics/getting_started/ - -You can also start LocalAI just with docker: - -``` -docker run -p 8080:8080 -v $PWD/models:/models -ti --rm quay.io/go-skynet/local-ai:master --models-path /models --threads 4 -``` - -### Mistral - -To setup mistral copy the files inside `mistral` in the `models` folder: - -```bash -cp -r examples/configurations/mistral/* models/ -``` - -Now download the model: - -```bash -wget https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-GGUF/resolve/main/mistral-7b-openorca.Q6_K.gguf -O models/mistral-7b-openorca.Q6_K.gguf -``` - -### LLaVA - -![llava](https://github.com/mudler/LocalAI/assets/2420543/cb0a0897-3b58-4350-af66-e6f4387b58d3) - -#### Setup - -``` -cp -r examples/configurations/llava/* models/ -wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/ggml-model-q4_k.gguf -O models/ggml-model-q4_k.gguf -wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf -O models/mmproj-model-f16.gguf -``` - -#### Try it out - -``` -curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{ - "model": "llava", - "messages": [{"role": "user", "content": [{"type":"text", "text": "What is in the image?"}, {"type": "image_url", "image_url": {"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" }}], "temperature": 0.9}]}' - -``` - -### Phi-2 - -``` -cp -r examples/configurations/phi-2.yaml models/ - -curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{ - "model": "phi-2", - "messages": [{"role": "user", "content": "How are you doing?", "temperature": 0.1}] -}' -``` - -### Mixtral - -``` -cp -r examples/configuration/mixtral/* models/ -wget https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q2_K.gguf -O models/mixtral-8x7b-instruct-v0.1.Q2_K.gguf -``` - -#### Test it out - -``` -curl http://localhost:8080/v1/completions -H "Content-Type: application/json" -d '{ - "model": "mixtral", - "prompt": "How fast is light?", - "temperature": 0.1 }' -``` diff --git a/examples/configurations/llava/chat-simple.tmpl b/examples/configurations/llava/chat-simple.tmpl deleted file mode 100644 index 5fe36767..00000000 --- a/examples/configurations/llava/chat-simple.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. -{{.Input}} -ASSISTANT: \ No newline at end of file diff --git a/examples/configurations/llava/llava.yaml b/examples/configurations/llava/llava.yaml deleted file mode 100644 index db2eaad0..00000000 --- a/examples/configurations/llava/llava.yaml +++ /dev/null @@ -1,19 +0,0 @@ -backend: llama-cpp -context_size: 4096 -f16: true -threads: 11 -gpu_layers: 90 -mmap: true -name: llava -roles: - user: "USER:" - assistant: "ASSISTANT:" - system: "SYSTEM:" -parameters: - model: ggml-model-q4_k.gguf - temperature: 0.2 - top_k: 40 - top_p: 0.95 -template: - chat: chat-simple -mmproj: mmproj-model-f16.gguf diff --git a/examples/configurations/mistral/chatml-block.tmpl b/examples/configurations/mistral/chatml-block.tmpl deleted file mode 100644 index cc86392a..00000000 --- a/examples/configurations/mistral/chatml-block.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -{{.Input}} -<|im_start|>assistant - diff --git a/examples/configurations/mistral/chatml.tmpl b/examples/configurations/mistral/chatml.tmpl deleted file mode 100644 index 09e25322..00000000 --- a/examples/configurations/mistral/chatml.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -<|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "user"}}user{{end}} -{{if .Content}}{{.Content}}{{end}} -<|im_end|> diff --git a/examples/configurations/mistral/completion.tmpl b/examples/configurations/mistral/completion.tmpl deleted file mode 100644 index 9867cfcd..00000000 --- a/examples/configurations/mistral/completion.tmpl +++ /dev/null @@ -1 +0,0 @@ -{{.Input}} \ No newline at end of file diff --git a/examples/configurations/mistral/mistral.yaml b/examples/configurations/mistral/mistral.yaml deleted file mode 100644 index d2927f06..00000000 --- a/examples/configurations/mistral/mistral.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: mistral -mmap: true -parameters: - model: mistral-7b-openorca.Q6_K.gguf - temperature: 0.2 - top_k: 40 - top_p: 0.95 -template: - chat_message: chatml - chat: chatml-block - completion: completion -context_size: 4096 -f16: true -stopwords: -- <|im_end|> -threads: 4 diff --git a/examples/configurations/mixtral/mixtral b/examples/configurations/mixtral/mixtral deleted file mode 100644 index 88ce5c01..00000000 --- a/examples/configurations/mixtral/mixtral +++ /dev/null @@ -1 +0,0 @@ -[INST] {{.Input}} [/INST] diff --git a/examples/configurations/mixtral/mixtral-chat b/examples/configurations/mixtral/mixtral-chat deleted file mode 100644 index 88ce5c01..00000000 --- a/examples/configurations/mixtral/mixtral-chat +++ /dev/null @@ -1 +0,0 @@ -[INST] {{.Input}} [/INST] diff --git a/examples/configurations/mixtral/mixtral.yaml b/examples/configurations/mixtral/mixtral.yaml deleted file mode 100755 index 9a2d7eed..00000000 --- a/examples/configurations/mixtral/mixtral.yaml +++ /dev/null @@ -1,16 +0,0 @@ -context_size: 512 -f16: true -threads: 11 -gpu_layers: 90 -name: mixtral -mmap: true -parameters: - model: mixtral-8x7b-instruct-v0.1.Q2_K.gguf - temperature: 0.2 - top_k: 40 - top_p: 0.95 - batch: 512 - tfz: 1.0 -template: - chat: mixtral-chat - completion: mixtral diff --git a/examples/configurations/phi-2.yaml b/examples/configurations/phi-2.yaml deleted file mode 100644 index e5a13442..00000000 --- a/examples/configurations/phi-2.yaml +++ /dev/null @@ -1,29 +0,0 @@ -name: phi-2 -context_size: 2048 -f16: true -gpu_layers: 90 -mmap: true -trimsuffix: -- "\n" -parameters: - model: huggingface://TheBloke/phi-2-GGUF/phi-2.Q8_0.gguf - temperature: 0.2 - top_k: 40 - top_p: 0.95 - seed: -1 - -mirostat: 2 -mirostat_eta: 1.0 -mirostat_tau: 1.0 -template: - chat: &template |- - Instruct: {{.Input}} - Output: - completion: *template - -usage: | - To use this model, interact with the API (in another terminal) with curl for instance: - curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{ - "model": "phi-2", - "messages": [{"role": "user", "content": "How are you doing?", "temperature": 0.1}] - }' diff --git a/examples/continue/README.md b/examples/continue/README.md deleted file mode 100644 index aa63530a..00000000 --- a/examples/continue/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Continue - -![logo](https://continue.dev/docs/assets/images/continue-cover-logo-aa135cc83fe8a14af480d1633ed74eb5.png) - -This document presents an example of integration with [continuedev/continue](https://github.com/continuedev/continue). - -![Screenshot](https://continue.dev/docs/assets/images/continue-screenshot-1f36b99467817f755739d7f4c4c08fe3.png) - -For a live demonstration, please click on the link below: - -- [How it works (Video demonstration)](https://www.youtube.com/watch?v=3Ocrc-WX4iQ) - -## Integration Setup Walkthrough - -1. [As outlined in `continue`'s documentation](https://continue.dev/docs/getting-started), install the [Visual Studio Code extension from the marketplace](https://marketplace.visualstudio.com/items?itemName=Continue.continue) and open it. -2. In this example, LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". Refer to the `docker-compose.yaml` file for details. - - ```bash - # Clone LocalAI - git clone https://github.com/go-skynet/LocalAI - - cd LocalAI/examples/continue - - # Start with docker-compose - docker-compose up --build -d - ``` - -3. Type `/config` within Continue's VSCode extension, or edit the file located at `~/.continue/config.py` on your system with the following configuration: - - ```py - from continuedev.src.continuedev.libs.llm.openai import OpenAI - - config = ContinueConfig( - ... - models=Models( - default=OpenAI( - api_key="my-api-key", - model="gpt-3.5-turbo", - api_base="http://localhost:8080", - ) - ), - ) - ``` - -This setup enables you to make queries directly to your model running in the Docker container. Note that the `api_key` does not need to be properly set up; it is included here as a placeholder. - -If editing the configuration seems confusing, you may copy and paste the provided default `config.py` file over the existing one in `~/.continue/config.py` after initializing the extension in the VSCode IDE. - -## Additional Resources - -- [Official Continue documentation](https://continue.dev/docs/intro) -- [Documentation page on using self-hosted models](https://continue.dev/docs/customization#self-hosting-an-open-source-model) -- [Official extension link](https://marketplace.visualstudio.com/items?itemName=Continue.continue) diff --git a/examples/continue/config.py b/examples/continue/config.py deleted file mode 100644 index cbc1c218..00000000 --- a/examples/continue/config.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -This is the Continue configuration file. - -See https://continue.dev/docs/customization to learn more. -""" - -import subprocess - -from continuedev.src.continuedev.core.main import Step -from continuedev.src.continuedev.core.sdk import ContinueSDK -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.core.config import CustomCommand, SlashCommand, ContinueConfig -from continuedev.src.continuedev.plugins.context_providers.github import GitHubIssuesContextProvider -from continuedev.src.continuedev.plugins.context_providers.google import GoogleContextProvider -from continuedev.src.continuedev.plugins.policies.default import DefaultPolicy -from continuedev.src.continuedev.libs.llm.openai import OpenAI, OpenAIServerInfo -from continuedev.src.continuedev.libs.llm.ggml import GGML - -from continuedev.src.continuedev.plugins.steps.open_config import OpenConfigStep -from continuedev.src.continuedev.plugins.steps.clear_history import ClearHistoryStep -from continuedev.src.continuedev.plugins.steps.feedback import FeedbackStep -from continuedev.src.continuedev.plugins.steps.comment_code import CommentCodeStep -from continuedev.src.continuedev.plugins.steps.share_session import ShareSessionStep -from continuedev.src.continuedev.plugins.steps.main import EditHighlightedCodeStep -from continuedev.src.continuedev.plugins.context_providers.search import SearchContextProvider -from continuedev.src.continuedev.plugins.context_providers.diff import DiffContextProvider -from continuedev.src.continuedev.plugins.context_providers.url import URLContextProvider - -class CommitMessageStep(Step): - """ - This is a Step, the building block of Continue. - It can be used below as a slash command, so that - run will be called when you type '/commit'. - """ - async def run(self, sdk: ContinueSDK): - - # Get the root directory of the workspace - dir = sdk.ide.workspace_directory - - # Run git diff in that directory - diff = subprocess.check_output( - ["git", "diff"], cwd=dir).decode("utf-8") - - # Ask the LLM to write a commit message, - # and set it as the description of this step - self.description = await sdk.models.default.complete( - f"{diff}\n\nWrite a short, specific (less than 50 chars) commit message about the above changes:") - - -config = ContinueConfig( - - # If set to False, we will not collect any usage data - # See here to learn what anonymous data we collect: https://continue.dev/docs/telemetry - allow_anonymous_telemetry=True, - - models = Models( - default = OpenAI( - api_key = "my-api-key", - model = "gpt-3.5-turbo", - openai_server_info = OpenAIServerInfo( - api_base = "http://localhost:8080", - model = "gpt-3.5-turbo" - ) - ) - ), - # Set a system message with information that the LLM should always keep in mind - # E.g. "Please give concise answers. Always respond in Spanish." - system_message=None, - - # Set temperature to any value between 0 and 1. Higher values will make the LLM - # more creative, while lower values will make it more predictable. - temperature=0.5, - - # Custom commands let you map a prompt to a shortened slash command - # They are like slash commands, but more easily defined - write just a prompt instead of a Step class - # Their output will always be in chat form - custom_commands=[ - # CustomCommand( - # name="test", - # description="Write unit tests for the higlighted code", - # prompt="Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.", - # ) - ], - - # Slash commands let you run a Step from a slash command - slash_commands=[ - # SlashCommand( - # name="commit", - # description="This is an example slash command. Use /config to edit it and create more", - # step=CommitMessageStep, - # ) - SlashCommand( - name="edit", - description="Edit code in the current file or the highlighted code", - step=EditHighlightedCodeStep, - ), - SlashCommand( - name="config", - description="Customize Continue - slash commands, LLMs, system message, etc.", - step=OpenConfigStep, - ), - SlashCommand( - name="comment", - description="Write comments for the current file or highlighted code", - step=CommentCodeStep, - ), - SlashCommand( - name="feedback", - description="Send feedback to improve Continue", - step=FeedbackStep, - ), - SlashCommand( - name="clear", - description="Clear step history", - step=ClearHistoryStep, - ), - SlashCommand( - name="share", - description="Download and share the session transcript", - step=ShareSessionStep, - ) - ], - - # Context providers let you quickly select context by typing '@' - # Uncomment the following to - # - quickly reference GitHub issues - # - show Google search results to the LLM - context_providers=[ - # GitHubIssuesContextProvider( - # repo_name="/", - # auth_token="" - # ), - # GoogleContextProvider( - # serper_api_key="" - # ) - SearchContextProvider(), - DiffContextProvider(), - URLContextProvider( - preset_urls = [ - # Add any common urls you reference here so they appear in autocomplete - ] - ) - ], - - # Policies hold the main logic that decides which Step to take next - # You can use them to design agents, or deeply customize Continue - policy=DefaultPolicy() -) diff --git a/examples/continue/docker-compose.yml b/examples/continue/docker-compose.yml deleted file mode 100644 index bc063194..00000000 --- a/examples/continue/docker-compose.yml +++ /dev/null @@ -1,27 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - # As initially LocalAI will download the models defined in PRELOAD_MODELS - # you might need to tweak the healthcheck values here according to your network connection. - # Here we give a timespan of 20m to download all the required files. - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] - interval: 1m - timeout: 20m - retries: 20 - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - environment: - - DEBUG=true - - MODELS_PATH=/models - # You can preload different models here as well. - # See: https://github.com/go-skynet/model-gallery - - 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]' - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai" ] diff --git a/examples/continue/img/screen.png b/examples/continue/img/screen.png deleted file mode 100755 index a03a80f6..00000000 Binary files a/examples/continue/img/screen.png and /dev/null differ diff --git a/examples/discord-bot/.env.example b/examples/discord-bot/.env.example deleted file mode 100644 index 332a71b7..00000000 --- a/examples/discord-bot/.env.example +++ /dev/null @@ -1,9 +0,0 @@ -# CPU .env docs: https://localai.io/howtos/easy-setup-docker-cpu/ -# GPU .env docs: https://localai.io/howtos/easy-setup-docker-gpu/ - -OPENAI_API_KEY=x -DISCORD_BOT_TOKEN=x -DISCORD_CLIENT_ID=x -OPENAI_API_BASE=http://api:8080 -ALLOWED_SERVER_IDS=x -SERVER_TO_MODERATION_CHANNEL=1:1 diff --git a/examples/discord-bot/README.md b/examples/discord-bot/README.md deleted file mode 100644 index 4aa9cdb7..00000000 --- a/examples/discord-bot/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# discord-bot - -![Screenshot from 2023-05-01 07-58-19](https://user-images.githubusercontent.com/2420543/235413924-0cb2e75b-f2d6-4119-8610-44386e44afb8.png) - -## Setup - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/discord-bot - -# (optional) Checkout a specific LocalAI tag -# git checkout -b build - -# Download gpt4all-j to models/ -wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j - -# Set the discord bot options (see: https://github.com/go-skynet/gpt-discord-bot#setup) -cp -rfv .env.example .env -vim .env - -# start with docker-compose -docker-compose up -d --build -``` - -Note: see setup options here: https://github.com/go-skynet/gpt-discord-bot#setup - -Open up the URL in the console and give permission to the bot in your server. Start a thread with `/chat ..` - -## Kubernetes - -- install the local-ai chart first -- change OPENAI_API_BASE to point to the API address and apply the discord-bot manifest: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: discord-bot ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: localai - namespace: discord-bot - labels: - app: localai -spec: - selector: - matchLabels: - app: localai - replicas: 1 - template: - metadata: - labels: - app: localai - name: localai - spec: - containers: - - name: localai-discord - env: - - name: OPENAI_API_KEY - value: "x" - - name: DISCORD_BOT_TOKEN - value: "" - - name: DISCORD_CLIENT_ID - value: "" - - name: OPENAI_API_BASE - value: "http://local-ai.default.svc.cluster.local:8080" - - name: ALLOWED_SERVER_IDS - value: "xx" - - name: SERVER_TO_MODERATION_CHANNEL - value: "1:1" - image: quay.io/go-skynet/gpt-discord-bot:main -``` diff --git a/examples/discord-bot/docker-compose.yaml b/examples/discord-bot/docker-compose.yaml deleted file mode 100644 index e7ee6b4c..00000000 --- a/examples/discord-bot/docker-compose.yaml +++ /dev/null @@ -1,21 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - environment: - - DEBUG=true - - MODELS_PATH=/models - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai" ] - - bot: - image: quay.io/go-skynet/gpt-discord-bot:main - env_file: - - .env diff --git a/examples/discord-bot/models b/examples/discord-bot/models deleted file mode 120000 index 1e266b1b..00000000 --- a/examples/discord-bot/models +++ /dev/null @@ -1 +0,0 @@ -../models \ No newline at end of file diff --git a/examples/e2e-fine-tuning/README.md b/examples/e2e-fine-tuning/README.md deleted file mode 100644 index d95d8914..00000000 --- a/examples/e2e-fine-tuning/README.md +++ /dev/null @@ -1,83 +0,0 @@ -This is an example of fine-tuning a LLM model to use with [LocalAI](https://github.com/mudler/LocalAI) written by [@mudler](https://github.com/mudler). - -Specifically, this example shows how to use [axolotl](https://github.com/OpenAccess-AI-Collective/axolotl) to fine-tune a LLM model to consume with LocalAI as a `gguf` model. - -A notebook is provided that currently works on _very small_ datasets on Google colab on the free instance. It is far from producing good models, but it gives a sense of how to use the code to use with a better dataset and configurations, and how to use the model produced with LocalAI. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mudler/LocalAI/blob/master/examples/e2e-fine-tuning/notebook.ipynb) - -## Requirements - -For this example you will need at least a 12GB VRAM of GPU and a Linux box. -The notebook is tested on Google Colab with a Tesla T4 GPU. - -## Clone this directory - -Clone the repository and enter the example directory: - -```bash -git clone http://github.com/mudler/LocalAI -cd LocalAI/examples/e2e-fine-tuning -``` - -## Install dependencies - -```bash -# Install axolotl and dependencies -git clone https://github.com/OpenAccess-AI-Collective/axolotl && pushd axolotl && git checkout 797f3dd1de8fd8c0eafbd1c9fdb172abd9ff840a && popd #0.3.0 -pip install packaging -pushd axolotl && pip install -e '.[flash-attn,deepspeed]' && popd - -# https://github.com/oobabooga/text-generation-webui/issues/4238 -pip install https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.0/flash_attn-2.3.0+cu117torch2.0cxx11abiFALSE-cp310-cp310-linux_x86_64.whl -``` - -Configure accelerate: - -```bash -accelerate config default -``` - -## Fine-tuning - -We will need to configure axolotl. In this example is provided a file to use `axolotl.yaml` that uses openllama-3b for fine-tuning. Copy the `axolotl.yaml` file and edit it to your needs. The dataset needs to be next to it as `dataset.json`. The format used is `completion` which is a list of JSON objects with a `text` field with the full text to train the LLM with. - -If you have a big dataset, you can pre-tokenize it to speedup the fine-tuning process: - -```bash -# Optional pre-tokenize (run only if big dataset) -python -m axolotl.cli.preprocess axolotl.yaml -``` - -Now we are ready to start the fine-tuning process: -```bash -# Fine-tune -accelerate launch -m axolotl.cli.train axolotl.yaml -``` - -After we have finished the fine-tuning, we merge the Lora base with the model: -```bash -# Merge lora -python3 -m axolotl.cli.merge_lora axolotl.yaml --lora_model_dir="./qlora-out" --load_in_8bit=False --load_in_4bit=False -``` - -And we convert it to the gguf format that LocalAI can consume: - -```bash - -# Convert to gguf -git clone https://github.com/ggerganov/llama.cpp.git -pushd llama.cpp && make GGML_CUDA=1 && popd - -# We need to convert the pytorch model into ggml for quantization -# It crates 'ggml-model-f16.bin' in the 'merged' directory. -pushd llama.cpp && python convert.py --outtype f16 \ - ../qlora-out/merged/pytorch_model-00001-of-00002.bin && popd - -# Start off by making a basic q4_0 4-bit quantization. -# It's important to have 'ggml' in the name of the quant for some -# software to recognize it's file format. -pushd llama.cpp && ./quantize ../qlora-out/merged/ggml-model-f16.gguf \ - ../custom-model-q4_0.bin q4_0 - -``` - -Now you should have ended up with a `custom-model-q4_0.bin` file that you can copy in the LocalAI models directory and use it with LocalAI. diff --git a/examples/e2e-fine-tuning/axolotl.yaml b/examples/e2e-fine-tuning/axolotl.yaml deleted file mode 100644 index ea956dd4..00000000 --- a/examples/e2e-fine-tuning/axolotl.yaml +++ /dev/null @@ -1,63 +0,0 @@ - -base_model: openlm-research/open_llama_3b_v2 -model_type: LlamaForCausalLM -tokenizer_type: LlamaTokenizer -load_in_8bit: false -load_in_4bit: true -strict: false -push_dataset_to_hub: false -datasets: -- path: dataset.json - ds_type: json - type: completion -dataset_prepared_path: -val_set_size: 0.05 -adapter: qlora -lora_model_dir: -sequence_len: 1024 -sample_packing: true -lora_r: 8 -lora_alpha: 32 -lora_dropout: 0.05 -lora_target_modules: -lora_target_linear: true -lora_fan_in_fan_out: -wandb_project: -wandb_entity: -wandb_watch: -wandb_run_id: -wandb_log_model: -output_dir: ./qlora-out -gradient_accumulation_steps: 1 -micro_batch_size: 2 -num_epochs: 4 -optimizer: paged_adamw_32bit -torchdistx_path: -lr_scheduler: cosine -learning_rate: 0.0002 -train_on_inputs: false -group_by_length: false -bf16: false -fp16: true -tf32: false -gradient_checkpointing: true -early_stopping_patience: -resume_from_checkpoint: -local_rank: -logging_steps: 1 -xformers_attention: -flash_attention: false -gptq_groupsize: -gptq_model_v1: -warmup_steps: 20 -eval_steps: 0.05 -save_steps: -debug: -deepspeed: -weight_decay: 0.1 -fsdp: -fsdp_config: -special_tokens: -bos_token: "" -eos_token: "" -unk_token: "" diff --git a/examples/e2e-fine-tuning/notebook.ipynb b/examples/e2e-fine-tuning/notebook.ipynb deleted file mode 100644 index e80dfce5..00000000 --- a/examples/e2e-fine-tuning/notebook.ipynb +++ /dev/null @@ -1,1655 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Finetuning a model and using it with LocalAI\n", - "\n", - "This is an example of fine-tuning a LLM model to use with [LocalAI](https://github.com/mudler/LocalAI) written by [@mudler](https://github.com/mudler).\n", - "\n", - "Specifically, this example shows how to use [axolotl](https://github.com/OpenAccess-AI-Collective/axolotl) to fine-tune a LLM model to consume with LocalAI as a `gguf` model." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "puRhZeHvuHgB" - }, - "source": [ - "# Important!\n", - "\n", - "Before starting, make sure you have selected GPU runtime : Runtime -> Change runtime type -> GPU (T4)!" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xcUAOoASZUV1" - }, - "source": [ - "Change the model to link to your dataset. Upload the dataset as `output.jsonl` in the root tree and edit the model file (model.yml) with:\n", - "\n", - "```\n", - "# local\n", - "datasets:\n", - " - path: /content/output.jsonl\n", - " ds_type: json\n", - " type: completion\n", - "\n", - "```\n", - "\n", - "A full example:\n", - "\n", - "```yaml\n", - "\n", - "base_model: openlm-research/open_llama_3b_v2\n", - "model_type: LlamaForCausalLM\n", - "tokenizer_type: LlamaTokenizer\n", - "load_in_8bit: false\n", - "load_in_4bit: true\n", - "strict: false\n", - "push_dataset_to_hub: false\n", - "datasets:\n", - " - path: /content/output.jsonl\n", - " ds_type: json\n", - " type: completion\n", - "dataset_prepared_path:\n", - "val_set_size: 0.05\n", - "adapter: qlora\n", - "lora_model_dir:\n", - "sequence_len: 1024\n", - "sample_packing: true\n", - "lora_r: 8\n", - "lora_alpha: 32\n", - "lora_dropout: 0.05\n", - "lora_target_modules:\n", - "lora_target_linear: true\n", - "lora_fan_in_fan_out:\n", - "wandb_project:\n", - "wandb_entity:\n", - "wandb_watch:\n", - "wandb_run_id:\n", - "wandb_log_model:\n", - "output_dir: ./qlora-out\n", - "gradient_accumulation_steps: 1\n", - "micro_batch_size: 2\n", - "num_epochs: 4\n", - "optimizer: paged_adamw_32bit\n", - "torchdistx_path:\n", - "lr_scheduler: cosine\n", - "learning_rate: 0.0002\n", - "train_on_inputs: false\n", - "group_by_length: false\n", - "bf16: false\n", - "fp16: true\n", - "tf32: false\n", - "gradient_checkpointing: true\n", - "early_stopping_patience:\n", - "resume_from_checkpoint:\n", - "local_rank:\n", - "logging_steps: 1\n", - "xformers_attention:\n", - "flash_attention: false\n", - "gptq_groupsize:\n", - "gptq_model_v1:\n", - "warmup_steps: 20\n", - "eval_steps: 0.05\n", - "save_steps:\n", - "debug:\n", - "deepspeed:\n", - "weight_decay: 0.1\n", - "fsdp:\n", - "fsdp_config:\n", - "special_tokens:\n", - " bos_token: \"\"\n", - " eos_token: \"\"\n", - " unk_token: \"\"\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "CBVQikr2WiFP", - "outputId": "236f9f0e-b2b5-4ba9-9127-27f804c511db" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Cloning into 'axolotl'...\n", - "remote: Enumerating objects: 7525, done.\u001b[K\n", - "remote: Counting objects: 100% (1726/1726), done.\u001b[K\n", - "remote: Compressing objects: 100% (385/385), done.\u001b[K\n", - "remote: Total 7525 (delta 1525), reused 1409 (delta 1319), pack-reused 5799\u001b[K\n", - "Receiving objects: 100% (7525/7525), 2.64 MiB | 10.52 MiB/s, done.\n", - "Resolving deltas: 100% (4854/4854), done.\n", - "Note: switching to '797f3dd1de8fd8c0eafbd1c9fdb172abd9ff840a'.\n", - "\n", - "You are in 'detached HEAD' state. You can look around, make experimental\n", - "changes and commit them, and you can discard any commits you make in this\n", - "state without impacting any branches by switching back to a branch.\n", - "\n", - "If you want to create a new branch to retain commits you create, you may\n", - "do so (now or later) by using -c with the switch command. Example:\n", - "\n", - " git switch -c \n", - "\n", - "Or undo this operation with:\n", - "\n", - " git switch -\n", - "\n", - "Turn off this advice by setting config variable advice.detachedHead to false\n", - "\n", - "HEAD is now at 797f3dd don't train if eval split is too small (#873)\n", - "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (23.2)\n", - "Obtaining file:///content/axolotl\n", - " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Collecting auto-gptq==0.5.1 (from axolotl==0.3.0)\n", - " Downloading auto_gptq-0.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.8/4.8 MB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from axolotl==0.3.0) (23.2)\n", - "Collecting peft==0.6.0 (from axolotl==0.3.0)\n", - " Downloading peft-0.6.0-py3-none-any.whl (134 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.9/134.9 kB\u001b[0m \u001b[31m20.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting transformers==4.35.1 (from axolotl==0.3.0)\n", - " Downloading transformers-4.35.1-py3-none-any.whl (7.9 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.9/7.9 MB\u001b[0m \u001b[31m44.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting bitsandbytes>=0.41.1 (from axolotl==0.3.0)\n", - " Downloading bitsandbytes-0.41.2.post2-py3-none-any.whl (92.6 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.6/92.6 MB\u001b[0m \u001b[31m9.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting accelerate==0.24.1 (from axolotl==0.3.0)\n", - " Downloading accelerate-0.24.1-py3-none-any.whl (261 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m261.4/261.4 kB\u001b[0m \u001b[31m31.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting addict (from axolotl==0.3.0)\n", - " Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n", - "Collecting fire (from axolotl==0.3.0)\n", - " Downloading fire-0.5.0.tar.gz (88 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m88.3/88.3 kB\u001b[0m \u001b[31m13.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: PyYAML>=6.0 in /usr/local/lib/python3.10/dist-packages (from axolotl==0.3.0) (6.0.1)\n", - "Collecting datasets>=2.14.0 (from axolotl==0.3.0)\n", - " Downloading datasets-2.15.0-py3-none-any.whl (521 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m521.2/521.2 kB\u001b[0m \u001b[31m51.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting sentencepiece (from axolotl==0.3.0)\n", - " Downloading sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m76.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting wandb (from axolotl==0.3.0)\n", - " Downloading wandb-0.16.0-py3-none-any.whl (2.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.1/2.1 MB\u001b[0m \u001b[31m90.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting einops (from axolotl==0.3.0)\n", - " Downloading einops-0.7.0-py3-none-any.whl (44 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m44.6/44.6 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting xformers==0.0.22 (from axolotl==0.3.0)\n", - " Downloading xformers-0.0.22-cp310-cp310-manylinux2014_x86_64.whl (211.6 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m211.6/211.6 MB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting optimum==1.13.2 (from axolotl==0.3.0)\n", - " Downloading optimum-1.13.2.tar.gz (300 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m301.0/301.0 kB\u001b[0m \u001b[31m37.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", - " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", - " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", - "Collecting hf_transfer (from axolotl==0.3.0)\n", - " Downloading hf_transfer-0.1.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.9 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.9/3.9 MB\u001b[0m \u001b[31m48.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting colorama (from axolotl==0.3.0)\n", - " Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n", - "Requirement already satisfied: numba in /usr/local/lib/python3.10/dist-packages (from axolotl==0.3.0) (0.58.1)\n", - "Collecting numpy>=1.24.4 (from axolotl==0.3.0)\n", - " Downloading numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (18.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m18.2/18.2 MB\u001b[0m \u001b[31m57.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting bert-score==0.3.13 (from axolotl==0.3.0)\n", - " Downloading bert_score-0.3.13-py3-none-any.whl (61 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m61.1/61.1 kB\u001b[0m \u001b[31m8.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting evaluate==0.4.0 (from axolotl==0.3.0)\n", - " Downloading evaluate-0.4.0-py3-none-any.whl (81 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m81.4/81.4 kB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting rouge-score==0.1.2 (from axolotl==0.3.0)\n", - " Downloading rouge_score-0.1.2.tar.gz (17 kB)\n", - " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from axolotl==0.3.0) (1.11.3)\n", - "Requirement already satisfied: scikit-learn==1.2.2 in /usr/local/lib/python3.10/dist-packages (from axolotl==0.3.0) (1.2.2)\n", - "Collecting pynvml (from axolotl==0.3.0)\n", - " Downloading pynvml-11.5.0-py3-none-any.whl (53 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m53.1/53.1 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting art (from axolotl==0.3.0)\n", - " Downloading art-6.1-py3-none-any.whl (599 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m599.8/599.8 kB\u001b[0m \u001b[31m38.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting fschat==0.2.29 (from axolotl==0.3.0)\n", - " Downloading fschat-0.2.29-py3-none-any.whl (200 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m200.7/200.7 kB\u001b[0m \u001b[31m21.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-4.4.1-py3-none-any.whl (15.4 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m70.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: tensorboard in /usr/local/lib/python3.10/dist-packages (from axolotl==0.3.0) (2.14.1)\n", - "Collecting s3fs (from axolotl==0.3.0)\n", - " Downloading s3fs-2023.10.0-py3-none-any.whl (28 kB)\n", - "Requirement already satisfied: gcsfs in /usr/local/lib/python3.10/dist-packages (from axolotl==0.3.0) (2023.6.0)\n", - "Collecting flash-attn>=2.3.0 (from axolotl==0.3.0)\n", - " Downloading flash_attn-2.3.3.tar.gz (2.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.3/2.3 MB\u001b[0m \u001b[31m81.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Collecting deepspeed (from axolotl==0.3.0)\n", - " Downloading deepspeed-0.12.3.tar.gz (1.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m84.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate==0.24.1->axolotl==0.3.0) (5.9.5)\n", - "Requirement already satisfied: torch>=1.10.0 in /usr/local/lib/python3.10/dist-packages (from accelerate==0.24.1->axolotl==0.3.0) (2.1.0+cu118)\n", - "Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.10/dist-packages (from accelerate==0.24.1->axolotl==0.3.0) (0.19.3)\n", - "Collecting rouge (from auto-gptq==0.5.1->axolotl==0.3.0)\n", - " Downloading rouge-1.0.1-py3-none-any.whl (13 kB)\n", - "Collecting gekko (from auto-gptq==0.5.1->axolotl==0.3.0)\n", - " Downloading gekko-1.0.6-py3-none-any.whl (12.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m12.2/12.2 MB\u001b[0m \u001b[31m91.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: safetensors in /usr/local/lib/python3.10/dist-packages (from auto-gptq==0.5.1->axolotl==0.3.0) (0.4.0)\n", - "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from auto-gptq==0.5.1->axolotl==0.3.0) (4.66.1)\n", - "Requirement already satisfied: pandas>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from bert-score==0.3.13->axolotl==0.3.0) (1.5.3)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from bert-score==0.3.13->axolotl==0.3.0) (2.31.0)\n", - "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from bert-score==0.3.13->axolotl==0.3.0) (3.7.1)\n", - "Collecting dill (from evaluate==0.4.0->axolotl==0.3.0)\n", - " Downloading dill-0.3.7-py3-none-any.whl (115 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m16.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from evaluate==0.4.0->axolotl==0.3.0) (3.4.1)\n", - "Collecting multiprocess (from evaluate==0.4.0->axolotl==0.3.0)\n", - " Downloading multiprocess-0.70.15-py310-none-any.whl (134 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m18.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: fsspec[http]>=2021.05.0 in /usr/local/lib/python3.10/dist-packages (from evaluate==0.4.0->axolotl==0.3.0) (2023.6.0)\n", - "Collecting responses<0.19 (from evaluate==0.4.0->axolotl==0.3.0)\n", - " Downloading responses-0.18.0-py3-none-any.whl (38 kB)\n", - "Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from fschat==0.2.29->axolotl==0.3.0) (3.8.6)\n", - "Collecting fastapi (from fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading fastapi-0.104.1-py3-none-any.whl (92 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.9/92.9 kB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting httpx (from fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading httpx-0.25.1-py3-none-any.whl (75 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.0/75.0 kB\u001b[0m \u001b[31m10.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting markdown2[all] (from fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading markdown2-2.4.10-py2.py3-none-any.whl (39 kB)\n", - "Collecting nh3 (from fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading nh3-0.2.14-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.7 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m82.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: prompt-toolkit>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from fschat==0.2.29->axolotl==0.3.0) (3.0.41)\n", - "Requirement already satisfied: pydantic<2,>=1 in /usr/local/lib/python3.10/dist-packages (from fschat==0.2.29->axolotl==0.3.0) (1.10.13)\n", - "Requirement already satisfied: rich>=10.0.0 in /usr/local/lib/python3.10/dist-packages (from fschat==0.2.29->axolotl==0.3.0) (13.7.0)\n", - "Collecting shortuuid (from fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading shortuuid-1.0.11-py3-none-any.whl (10 kB)\n", - "Collecting tiktoken (from fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading tiktoken-0.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.0 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m76.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting uvicorn (from fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading uvicorn-0.24.0.post1-py3-none-any.whl (59 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m59.7/59.7 kB\u001b[0m \u001b[31m8.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting coloredlogs (from optimum==1.13.2->axolotl==0.3.0)\n", - " Downloading coloredlogs-15.0.1-py2.py3-none-any.whl (46 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from optimum==1.13.2->axolotl==0.3.0) (1.12)\n", - "Requirement already satisfied: transformers[sentencepiece]>=4.26.0 in /usr/local/lib/python3.10/dist-packages (from optimum==1.13.2->axolotl==0.3.0) (4.35.2)\n", - "Requirement already satisfied: absl-py in /usr/local/lib/python3.10/dist-packages (from rouge-score==0.1.2->axolotl==0.3.0) (1.4.0)\n", - "Requirement already satisfied: nltk in /usr/local/lib/python3.10/dist-packages (from rouge-score==0.1.2->axolotl==0.3.0) (3.8.1)\n", - "Requirement already satisfied: six>=1.14.0 in /usr/local/lib/python3.10/dist-packages (from rouge-score==0.1.2->axolotl==0.3.0) (1.16.0)\n", - "Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn==1.2.2->axolotl==0.3.0) (1.3.2)\n", - "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn==1.2.2->axolotl==0.3.0) (3.2.0)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers==4.35.1->axolotl==0.3.0) (3.13.1)\n", - "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers==4.35.1->axolotl==0.3.0) (2023.6.3)\n", - "Collecting tokenizers<0.15,>=0.14 (from transformers==4.35.1->axolotl==0.3.0)\n", - " Downloading tokenizers-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.8/3.8 MB\u001b[0m \u001b[31m116.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting torch>=1.10.0 (from accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading torch-2.0.1-cp310-cp310-manylinux1_x86_64.whl (619.9 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m619.9/619.9 MB\u001b[0m \u001b[31m2.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0) (4.5.0)\n", - "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0) (3.2.1)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0) (3.1.2)\n", - "Collecting nvidia-cuda-nvrtc-cu11==11.7.99 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_cuda_nvrtc_cu11-11.7.99-2-py3-none-manylinux1_x86_64.whl (21.0 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.0/21.0 MB\u001b[0m \u001b[31m63.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting nvidia-cuda-runtime-cu11==11.7.99 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_cuda_runtime_cu11-11.7.99-py3-none-manylinux1_x86_64.whl (849 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m849.3/849.3 kB\u001b[0m \u001b[31m58.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting nvidia-cuda-cupti-cu11==11.7.101 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_cuda_cupti_cu11-11.7.101-py3-none-manylinux1_x86_64.whl (11.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11.8/11.8 MB\u001b[0m \u001b[31m79.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting nvidia-cudnn-cu11==8.5.0.96 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_cudnn_cu11-8.5.0.96-2-py3-none-manylinux1_x86_64.whl (557.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m557.1/557.1 MB\u001b[0m \u001b[31m3.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting nvidia-cublas-cu11==11.10.3.66 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_cublas_cu11-11.10.3.66-py3-none-manylinux1_x86_64.whl (317.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m317.1/317.1 MB\u001b[0m \u001b[31m4.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting nvidia-cufft-cu11==10.9.0.58 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_cufft_cu11-10.9.0.58-py3-none-manylinux1_x86_64.whl (168.4 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m168.4/168.4 MB\u001b[0m \u001b[31m7.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting nvidia-curand-cu11==10.2.10.91 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_curand_cu11-10.2.10.91-py3-none-manylinux1_x86_64.whl (54.6 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.6/54.6 MB\u001b[0m \u001b[31m12.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting nvidia-cusolver-cu11==11.4.0.1 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_cusolver_cu11-11.4.0.1-2-py3-none-manylinux1_x86_64.whl (102.6 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m102.6/102.6 MB\u001b[0m \u001b[31m8.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting nvidia-cusparse-cu11==11.7.4.91 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_cusparse_cu11-11.7.4.91-py3-none-manylinux1_x86_64.whl (173.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m173.2/173.2 MB\u001b[0m \u001b[31m6.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting nvidia-nccl-cu11==2.14.3 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_nccl_cu11-2.14.3-py3-none-manylinux1_x86_64.whl (177.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m177.1/177.1 MB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting nvidia-nvtx-cu11==11.7.91 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading nvidia_nvtx_cu11-11.7.91-py3-none-manylinux1_x86_64.whl (98 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m98.6/98.6 kB\u001b[0m \u001b[31m13.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting triton==2.0.0 (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading triton-2.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (63.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m63.3/63.3 MB\u001b[0m \u001b[31m9.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from nvidia-cublas-cu11==11.10.3.66->torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0) (67.7.2)\n", - "Requirement already satisfied: wheel in /usr/local/lib/python3.10/dist-packages (from nvidia-cublas-cu11==11.10.3.66->torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0) (0.41.3)\n", - "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0) (3.27.7)\n", - "Collecting lit (from triton==2.0.0->torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading lit-17.0.5.tar.gz (153 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m153.0/153.0 kB\u001b[0m \u001b[31m20.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", - " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", - " Installing backend dependencies ... \u001b[?25l\u001b[?25hdone\n", - " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: pyarrow>=8.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.14.0->axolotl==0.3.0) (9.0.0)\n", - "Collecting pyarrow-hotfix (from datasets>=2.14.0->axolotl==0.3.0)\n", - " Downloading pyarrow_hotfix-0.5-py3-none-any.whl (7.8 kB)\n", - "Collecting ninja (from flash-attn>=2.3.0->axolotl==0.3.0)\n", - " Downloading ninja-1.11.1.1-py2.py3-none-manylinux1_x86_64.manylinux_2_5_x86_64.whl (307 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m307.2/307.2 kB\u001b[0m \u001b[31m37.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting hjson (from deepspeed->axolotl==0.3.0)\n", - " Downloading hjson-3.1.0-py3-none-any.whl (54 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.0/54.0 kB\u001b[0m \u001b[31m9.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: py-cpuinfo in /usr/local/lib/python3.10/dist-packages (from deepspeed->axolotl==0.3.0) (9.0.0)\n", - "Requirement already satisfied: termcolor in /usr/local/lib/python3.10/dist-packages (from fire->axolotl==0.3.0) (2.3.0)\n", - "Requirement already satisfied: decorator>4.1.2 in /usr/local/lib/python3.10/dist-packages (from gcsfs->axolotl==0.3.0) (4.4.2)\n", - "Requirement already satisfied: google-auth>=1.2 in /usr/local/lib/python3.10/dist-packages (from gcsfs->axolotl==0.3.0) (2.17.3)\n", - "Requirement already satisfied: google-auth-oauthlib in /usr/local/lib/python3.10/dist-packages (from gcsfs->axolotl==0.3.0) (1.0.0)\n", - "Requirement already satisfied: google-cloud-storage in /usr/local/lib/python3.10/dist-packages (from gcsfs->axolotl==0.3.0) (2.8.0)\n", - "Collecting aiofiles<24.0,>=22.0 (from gradio->axolotl==0.3.0)\n", - " Downloading aiofiles-23.2.1-py3-none-any.whl (15 kB)\n", - "Requirement already satisfied: altair<6.0,>=4.2.0 in /usr/local/lib/python3.10/dist-packages (from gradio->axolotl==0.3.0) (4.2.2)\n", - "Collecting ffmpy (from gradio->axolotl==0.3.0)\n", - " Downloading ffmpy-0.3.1.tar.gz (5.5 kB)\n", - " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Collecting gradio-client==0.7.0 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.7.0-py3-none-any.whl (302 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m302.7/302.7 kB\u001b[0m \u001b[31m37.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: importlib-resources<7.0,>=1.3 in /usr/local/lib/python3.10/dist-packages (from gradio->axolotl==0.3.0) (6.1.1)\n", - "Requirement already satisfied: markupsafe~=2.0 in /usr/local/lib/python3.10/dist-packages (from gradio->axolotl==0.3.0) (2.1.3)\n", - "Collecting orjson~=3.0 (from gradio->axolotl==0.3.0)\n", - " Downloading orjson-3.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (138 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m138.7/138.7 kB\u001b[0m \u001b[31m20.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: pillow<11.0,>=8.0 in /usr/local/lib/python3.10/dist-packages (from gradio->axolotl==0.3.0) (9.4.0)\n", - "INFO: pip is looking at multiple versions of gradio to determine which version is compatible with other requirements. This could take a while.\n", - "Collecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-4.4.0-py3-none-any.whl (15.4 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m84.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-4.3.0-py3-none-any.whl (15.4 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m76.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-4.2.0-py3-none-any.whl (15.4 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m81.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-4.1.2-py3-none-any.whl (15.4 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m83.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-4.1.1-py3-none-any.whl (15.4 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m78.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-4.1.0-py3-none-any.whl (15.4 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m17.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-4.0.2-py3-none-any.whl (25.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m25.3/25.3 MB\u001b[0m \u001b[31m49.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hINFO: pip is looking at multiple versions of gradio to determine which version is compatible with other requirements. This could take a while.\n", - " Downloading gradio-4.0.1-py3-none-any.whl (25.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m25.3/25.3 MB\u001b[0m \u001b[31m56.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-4.0.0-py3-none-any.whl (25.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m25.3/25.3 MB\u001b[0m \u001b[31m15.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.50.2-py3-none-any.whl (20.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.3/20.3 MB\u001b[0m \u001b[31m66.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client==0.6.1 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.6.1-py3-none-any.whl (299 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m299.2/299.2 kB\u001b[0m \u001b[31m33.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting pydub (from gradio->axolotl==0.3.0)\n", - " Downloading pydub-0.25.1-py2.py3-none-any.whl (32 kB)\n", - "Collecting python-multipart (from gradio->axolotl==0.3.0)\n", - " Downloading python_multipart-0.0.6-py3-none-any.whl (45 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m45.7/45.7 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting semantic-version~=2.0 (from gradio->axolotl==0.3.0)\n", - " Downloading semantic_version-2.10.0-py2.py3-none-any.whl (15 kB)\n", - "Collecting websockets<12.0,>=10.0 (from gradio->axolotl==0.3.0)\n", - " Downloading websockets-11.0.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (129 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m129.9/129.9 kB\u001b[0m \u001b[31m18.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: llvmlite<0.42,>=0.41.0dev0 in /usr/local/lib/python3.10/dist-packages (from numba->axolotl==0.3.0) (0.41.1)\n", - "Collecting aiobotocore~=2.7.0 (from s3fs->axolotl==0.3.0)\n", - " Downloading aiobotocore-2.7.0-py3-none-any.whl (73 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m73.5/73.5 kB\u001b[0m \u001b[31m10.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hINFO: pip is looking at multiple versions of s3fs to determine which version is compatible with other requirements. This could take a while.\n", - "Collecting s3fs (from axolotl==0.3.0)\n", - " Downloading s3fs-2023.9.2-py3-none-any.whl (28 kB)\n", - "Collecting aiobotocore~=2.5.4 (from s3fs->axolotl==0.3.0)\n", - " Downloading aiobotocore-2.5.4-py3-none-any.whl (73 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m73.4/73.4 kB\u001b[0m \u001b[31m8.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting s3fs (from axolotl==0.3.0)\n", - " Downloading s3fs-2023.9.1-py3-none-any.whl (28 kB)\n", - " Downloading s3fs-2023.9.0-py3-none-any.whl (28 kB)\n", - " Downloading s3fs-2023.6.0-py3-none-any.whl (28 kB)\n", - "Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.10/dist-packages (from tensorboard->axolotl==0.3.0) (1.59.2)\n", - "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard->axolotl==0.3.0) (3.5.1)\n", - "Requirement already satisfied: protobuf>=3.19.6 in /usr/local/lib/python3.10/dist-packages (from tensorboard->axolotl==0.3.0) (3.20.3)\n", - "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard->axolotl==0.3.0) (0.7.2)\n", - "Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard->axolotl==0.3.0) (3.0.1)\n", - "Requirement already satisfied: Click!=8.0.0,>=7.1 in /usr/local/lib/python3.10/dist-packages (from wandb->axolotl==0.3.0) (8.1.7)\n", - "Collecting GitPython!=3.1.29,>=1.0.0 (from wandb->axolotl==0.3.0)\n", - " Downloading GitPython-3.1.40-py3-none-any.whl (190 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m190.6/190.6 kB\u001b[0m \u001b[31m26.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting sentry-sdk>=1.0.0 (from wandb->axolotl==0.3.0)\n", - " Downloading sentry_sdk-1.35.0-py2.py3-none-any.whl (248 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m248.6/248.6 kB\u001b[0m \u001b[31m27.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting docker-pycreds>=0.4.0 (from wandb->axolotl==0.3.0)\n", - " Downloading docker_pycreds-0.4.0-py2.py3-none-any.whl (9.0 kB)\n", - "Collecting setproctitle (from wandb->axolotl==0.3.0)\n", - " Downloading setproctitle-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (30 kB)\n", - "Requirement already satisfied: appdirs>=1.4.3 in /usr/local/lib/python3.10/dist-packages (from wandb->axolotl==0.3.0) (1.4.4)\n", - "Collecting botocore<1.31.18,>=1.31.17 (from aiobotocore~=2.5.4->s3fs->axolotl==0.3.0)\n", - " Downloading botocore-1.31.17-py3-none-any.whl (11.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11.1/11.1 MB\u001b[0m \u001b[31m122.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: wrapt<2.0.0,>=1.10.10 in /usr/local/lib/python3.10/dist-packages (from aiobotocore~=2.5.4->s3fs->axolotl==0.3.0) (1.14.1)\n", - "Collecting aioitertools<1.0.0,>=0.5.1 (from aiobotocore~=2.5.4->s3fs->axolotl==0.3.0)\n", - " Downloading aioitertools-0.11.0-py3-none-any.whl (23 kB)\n", - "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->fschat==0.2.29->axolotl==0.3.0) (23.1.0)\n", - "Requirement already satisfied: charset-normalizer<4.0,>=2.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->fschat==0.2.29->axolotl==0.3.0) (3.3.2)\n", - "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->fschat==0.2.29->axolotl==0.3.0) (6.0.4)\n", - "Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /usr/local/lib/python3.10/dist-packages (from aiohttp->fschat==0.2.29->axolotl==0.3.0) (4.0.3)\n", - "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->fschat==0.2.29->axolotl==0.3.0) (1.9.2)\n", - "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->fschat==0.2.29->axolotl==0.3.0) (1.4.0)\n", - "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->fschat==0.2.29->axolotl==0.3.0) (1.3.1)\n", - "Requirement already satisfied: entrypoints in /usr/local/lib/python3.10/dist-packages (from altair<6.0,>=4.2.0->gradio->axolotl==0.3.0) (0.4)\n", - "Requirement already satisfied: jsonschema>=3.0 in /usr/local/lib/python3.10/dist-packages (from altair<6.0,>=4.2.0->gradio->axolotl==0.3.0) (4.19.2)\n", - "Requirement already satisfied: toolz in /usr/local/lib/python3.10/dist-packages (from altair<6.0,>=4.2.0->gradio->axolotl==0.3.0) (0.12.0)\n", - "Collecting gitdb<5,>=4.0.1 (from GitPython!=3.1.29,>=1.0.0->wandb->axolotl==0.3.0)\n", - " Downloading gitdb-4.0.11-py3-none-any.whl (62 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.7/62.7 kB\u001b[0m \u001b[31m10.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth>=1.2->gcsfs->axolotl==0.3.0) (5.3.2)\n", - "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from google-auth>=1.2->gcsfs->axolotl==0.3.0) (0.3.0)\n", - "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from google-auth>=1.2->gcsfs->axolotl==0.3.0) (4.9)\n", - "Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from google-auth-oauthlib->gcsfs->axolotl==0.3.0) (1.3.1)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->bert-score==0.3.13->axolotl==0.3.0) (1.2.0)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->bert-score==0.3.13->axolotl==0.3.0) (0.12.1)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->bert-score==0.3.13->axolotl==0.3.0) (4.44.3)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->bert-score==0.3.13->axolotl==0.3.0) (1.4.5)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->bert-score==0.3.13->axolotl==0.3.0) (3.1.1)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->bert-score==0.3.13->axolotl==0.3.0) (2.8.2)\n", - "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.0.1->bert-score==0.3.13->axolotl==0.3.0) (2023.3.post1)\n", - "Requirement already satisfied: wcwidth in /usr/local/lib/python3.10/dist-packages (from prompt-toolkit>=3.0.0->fschat==0.2.29->axolotl==0.3.0) (0.2.10)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->bert-score==0.3.13->axolotl==0.3.0) (3.4)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->bert-score==0.3.13->axolotl==0.3.0) (2.0.7)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->bert-score==0.3.13->axolotl==0.3.0) (2023.7.22)\n", - "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich>=10.0.0->fschat==0.2.29->axolotl==0.3.0) (3.0.0)\n", - "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich>=10.0.0->fschat==0.2.29->axolotl==0.3.0) (2.16.1)\n", - "INFO: pip is looking at multiple versions of tokenizers to determine which version is compatible with other requirements. This could take a while.\n", - "Collecting tokenizers<0.15,>=0.14 (from transformers==4.35.1->axolotl==0.3.0)\n", - " Downloading tokenizers-0.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.8/3.8 MB\u001b[0m \u001b[31m103.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hINFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n", - "Collecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.50.1-py3-none-any.whl (20.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.3/20.3 MB\u001b[0m \u001b[31m88.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.50.0-py3-none-any.whl (20.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.3/20.3 MB\u001b[0m \u001b[31m37.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.49.0-py3-none-any.whl (20.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.3/20.3 MB\u001b[0m \u001b[31m57.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hINFO: pip is looking at multiple versions of tokenizers to determine which version is compatible with other requirements. This could take a while.\n", - " Downloading gradio-3.48.0-py3-none-any.whl (20.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.3/20.3 MB\u001b[0m \u001b[31m63.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.47.1-py3-none-any.whl (20.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.3/20.3 MB\u001b[0m \u001b[31m18.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client==0.6.0 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.6.0-py3-none-any.whl (298 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m298.8/298.8 kB\u001b[0m \u001b[31m37.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.47.0-py3-none-any.whl (20.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.3/20.3 MB\u001b[0m \u001b[31m98.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hINFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n", - " Downloading gradio-3.46.1-py3-none-any.whl (20.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.2/20.2 MB\u001b[0m \u001b[31m95.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client==0.5.3 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.5.3-py3-none-any.whl (298 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m298.4/298.4 kB\u001b[0m \u001b[31m39.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.46.0-py3-none-any.whl (20.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.2/20.2 MB\u001b[0m \u001b[31m95.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.45.2-py3-none-any.whl (20.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.2/20.2 MB\u001b[0m \u001b[31m28.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.45.1-py3-none-any.whl (20.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.2/20.2 MB\u001b[0m \u001b[31m57.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client==0.5.2 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.5.2-py3-none-any.whl (298 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m298.3/298.3 kB\u001b[0m \u001b[31m37.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.45.0-py3-none-any.whl (20.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.2/20.2 MB\u001b[0m \u001b[31m96.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.44.4-py3-none-any.whl (20.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.2/20.2 MB\u001b[0m \u001b[31m22.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client==0.5.1 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.5.1-py3-none-any.whl (298 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m298.2/298.2 kB\u001b[0m \u001b[31m19.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.44.3-py3-none-any.whl (20.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.2/20.2 MB\u001b[0m \u001b[31m96.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client==0.5.0 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.5.0-py3-none-any.whl (298 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m298.2/298.2 kB\u001b[0m \u001b[31m37.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.44.2-py3-none-any.whl (20.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.2/20.2 MB\u001b[0m \u001b[31m91.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.44.1-py3-none-any.whl (20.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.2/20.2 MB\u001b[0m \u001b[31m104.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.44.0-py3-none-any.whl (20.2 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.2/20.2 MB\u001b[0m \u001b[31m18.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.43.2-py3-none-any.whl (20.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.1/20.1 MB\u001b[0m \u001b[31m67.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.43.1-py3-none-any.whl (20.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.1/20.1 MB\u001b[0m \u001b[31m64.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.43.0-py3-none-any.whl (20.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.1/20.1 MB\u001b[0m \u001b[31m75.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.42.0-py3-none-any.whl (20.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.1/20.1 MB\u001b[0m \u001b[31m16.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.41.2-py3-none-any.whl (20.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.1/20.1 MB\u001b[0m \u001b[31m75.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.41.1-py3-none-any.whl (20.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.1/20.1 MB\u001b[0m \u001b[31m40.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.41.0-py3-none-any.whl (20.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.1/20.1 MB\u001b[0m \u001b[31m25.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.40.1-py3-none-any.whl (20.0 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.0/20.0 MB\u001b[0m \u001b[31m53.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting mdit-py-plugins<=0.3.3 (from gradio->axolotl==0.3.0)\n", - " Downloading mdit_py_plugins-0.3.3-py3-none-any.whl (50 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.5/50.5 kB\u001b[0m \u001b[31m6.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client>=0.4.0 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.4.0-py3-none-any.whl (297 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m297.4/297.4 kB\u001b[0m \u001b[31m32.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.40.0-py3-none-any.whl (20.0 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.0/20.0 MB\u001b[0m \u001b[31m54.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.39.0-py3-none-any.whl (19.9 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.9/19.9 MB\u001b[0m \u001b[31m67.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client>=0.3.0 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.3.0-py3-none-any.whl (294 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m294.2/294.2 kB\u001b[0m \u001b[31m37.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.38.0-py3-none-any.whl (19.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.8/19.8 MB\u001b[0m \u001b[31m18.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client>=0.2.10 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.2.10-py3-none-any.whl (288 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m289.0/289.0 kB\u001b[0m \u001b[31m35.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.37.0-py3-none-any.whl (19.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.8/19.8 MB\u001b[0m \u001b[31m90.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.36.1-py3-none-any.whl (19.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.8/19.8 MB\u001b[0m \u001b[31m103.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting websockets>=10.0 (from gradio->axolotl==0.3.0)\n", - " Downloading websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (130 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m130.2/130.2 kB\u001b[0m \u001b[31m18.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client>=0.2.7 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.2.9-py3-none-any.whl (288 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m288.8/288.8 kB\u001b[0m \u001b[31m32.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio_client-0.2.8-py3-none-any.whl (288 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m288.8/288.8 kB\u001b[0m \u001b[31m27.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio_client-0.2.7-py3-none-any.whl (288 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m288.4/288.4 kB\u001b[0m \u001b[31m36.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.36.0-py3-none-any.whl (19.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.8/19.8 MB\u001b[0m \u001b[31m74.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.35.2-py3-none-any.whl (19.7 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.7/19.7 MB\u001b[0m \u001b[31m22.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.35.1-py3-none-any.whl (19.7 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.7/19.7 MB\u001b[0m \u001b[31m52.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.35.0-py3-none-any.whl (19.7 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.7/19.7 MB\u001b[0m \u001b[31m93.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.34.0-py3-none-any.whl (20.0 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.0/20.0 MB\u001b[0m \u001b[31m92.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client>=0.2.6 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.2.6-py3-none-any.whl (288 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m288.3/288.3 kB\u001b[0m \u001b[31m30.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.33.1-py3-none-any.whl (20.0 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.0/20.0 MB\u001b[0m \u001b[31m18.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client>=0.2.4 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.2.5-py3-none-any.whl (288 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m288.1/288.1 kB\u001b[0m \u001b[31m34.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio_client-0.2.4-py3-none-any.whl (287 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m287.9/287.9 kB\u001b[0m \u001b[31m30.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.33.0-py3-none-any.whl (20.0 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.0/20.0 MB\u001b[0m \u001b[31m95.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.32.0-py3-none-any.whl (19.9 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.9/19.9 MB\u001b[0m \u001b[31m95.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.31.0-py3-none-any.whl (17.4 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.4/17.4 MB\u001b[0m \u001b[31m86.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.30.0-py3-none-any.whl (17.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.3/17.3 MB\u001b[0m \u001b[31m73.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.29.0-py3-none-any.whl (17.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.3/17.3 MB\u001b[0m \u001b[31m15.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client>=0.2.1 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.2.3-py3-none-any.whl (287 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m287.9/287.9 kB\u001b[0m \u001b[31m32.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio_client-0.2.2-py3-none-any.whl (287 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m287.9/287.9 kB\u001b[0m \u001b[31m33.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio_client-0.2.1-py3-none-any.whl (287 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m287.9/287.9 kB\u001b[0m \u001b[31m34.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.28.3-py3-none-any.whl (17.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.3/17.3 MB\u001b[0m \u001b[31m75.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client>=0.1.3 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.2.0-py3-none-any.whl (287 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m287.9/287.9 kB\u001b[0m \u001b[31m30.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio_client-0.1.4-py3-none-any.whl (286 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m286.7/286.7 kB\u001b[0m \u001b[31m32.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio_client-0.1.3-py3-none-any.whl (286 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m286.2/286.2 kB\u001b[0m \u001b[31m31.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.28.2-py3-none-any.whl (17.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.3/17.3 MB\u001b[0m \u001b[31m68.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.28.1-py3-none-any.whl (17.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.3/17.3 MB\u001b[0m \u001b[31m64.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.28.0-py3-none-any.whl (17.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.3/17.3 MB\u001b[0m \u001b[31m98.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.27.0-py3-none-any.whl (17.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.3/17.3 MB\u001b[0m \u001b[31m33.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.26.0-py3-none-any.whl (17.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.3/17.3 MB\u001b[0m \u001b[31m101.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client==0.1.2 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.1.2-py3-none-any.whl (286 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m286.1/286.1 kB\u001b[0m \u001b[31m36.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.25.0-py3-none-any.whl (17.5 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.5/17.5 MB\u001b[0m \u001b[31m101.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client>=0.0.8 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.1.0-py3-none-any.whl (24 kB)\n", - " Downloading gradio_client-0.0.10-py3-none-any.whl (23 kB)\n", - " Downloading gradio_client-0.0.9-py3-none-any.whl (23 kB)\n", - " Downloading gradio_client-0.0.8-py3-none-any.whl (20 kB)\n", - "Collecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.24.1-py3-none-any.whl (15.7 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.7/15.7 MB\u001b[0m \u001b[31m86.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting gradio-client>=0.0.5 (from gradio->axolotl==0.3.0)\n", - " Downloading gradio_client-0.0.7-py3-none-any.whl (14 kB)\n", - " Downloading gradio_client-0.0.6-py3-none-any.whl (14 kB)\n", - " Downloading gradio_client-0.0.5-py3-none-any.whl (12 kB)\n", - "Collecting gradio (from axolotl==0.3.0)\n", - " Downloading gradio-3.24.0-py3-none-any.whl (15.7 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.7/15.7 MB\u001b[0m \u001b[31m15.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.23.0-py3-none-any.whl (15.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.8/15.8 MB\u001b[0m \u001b[31m107.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.22.1-py3-none-any.whl (15.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.8/15.8 MB\u001b[0m \u001b[31m91.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.22.0-py3-none-any.whl (15.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.8/15.8 MB\u001b[0m \u001b[31m90.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.21.0-py3-none-any.whl (15.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.8/15.8 MB\u001b[0m \u001b[31m102.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Downloading gradio-3.20.1-py3-none-any.whl (14.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m14.3/14.3 MB\u001b[0m \u001b[31m106.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting pycryptodome (from gradio->axolotl==0.3.0)\n", - " Downloading pycryptodome-3.19.0-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.1 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.1/2.1 MB\u001b[0m \u001b[31m92.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting datasets>=2.14.0 (from axolotl==0.3.0)\n", - " Downloading datasets-2.14.7-py3-none-any.whl (520 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m520.4/520.4 kB\u001b[0m \u001b[31m53.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting huggingface-hub (from accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading huggingface_hub-0.17.3-py3-none-any.whl (295 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m295.0/295.0 kB\u001b[0m \u001b[31m32.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hINFO: pip is looking at multiple versions of s3fs to determine which version is compatible with other requirements. This could take a while.\n", - "INFO: pip is looking at multiple versions of transformers[sentencepiece] to determine which version is compatible with other requirements. This could take a while.\n", - "Collecting h11>=0.8 (from uvicorn->fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading h11-0.14.0-py3-none-any.whl (58 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m8.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting humanfriendly>=9.1 (from coloredlogs->optimum==1.13.2->axolotl==0.3.0)\n", - " Downloading humanfriendly-10.0-py2.py3-none-any.whl (86 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m319.1 kB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: anyio<4.0.0,>=3.7.1 in /usr/local/lib/python3.10/dist-packages (from fastapi->fschat==0.2.29->axolotl==0.3.0) (3.7.1)\n", - "Collecting starlette<0.28.0,>=0.27.0 (from fastapi->fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading starlette-0.27.0-py3-none-any.whl (66 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.0/67.0 kB\u001b[0m \u001b[31m8.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting typing-extensions (from torch>=1.10.0->accelerate==0.24.1->axolotl==0.3.0)\n", - " Downloading typing_extensions-4.8.0-py3-none-any.whl (31 kB)\n", - "Requirement already satisfied: google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5 in /usr/local/lib/python3.10/dist-packages (from google-cloud-storage->gcsfs->axolotl==0.3.0) (2.11.1)\n", - "Requirement already satisfied: google-cloud-core<3.0dev,>=2.3.0 in /usr/local/lib/python3.10/dist-packages (from google-cloud-storage->gcsfs->axolotl==0.3.0) (2.3.3)\n", - "Requirement already satisfied: google-resumable-media>=2.3.2 in /usr/local/lib/python3.10/dist-packages (from google-cloud-storage->gcsfs->axolotl==0.3.0) (2.6.0)\n", - "Collecting httpcore (from httpx->fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading httpcore-1.0.2-py3-none-any.whl (76 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m76.9/76.9 kB\u001b[0m \u001b[31m9.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->fschat==0.2.29->axolotl==0.3.0) (1.3.0)\n", - "Collecting wavedrom (from markdown2[all]->fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading wavedrom-2.0.3.post3.tar.gz (137 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m137.7/137.7 kB\u001b[0m \u001b[31m19.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->optimum==1.13.2->axolotl==0.3.0) (1.3.0)\n", - "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<4.0.0,>=3.7.1->fastapi->fschat==0.2.29->axolotl==0.3.0) (1.1.3)\n", - "Collecting jmespath<2.0.0,>=0.7.1 (from botocore<1.31.18,>=1.31.17->aiobotocore~=2.5.4->s3fs->axolotl==0.3.0)\n", - " Downloading jmespath-1.0.1-py3-none-any.whl (20 kB)\n", - "Collecting urllib3<3,>=1.21.1 (from requests->bert-score==0.3.13->axolotl==0.3.0)\n", - " Downloading urllib3-1.26.18-py2.py3-none-any.whl (143 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m143.8/143.8 kB\u001b[0m \u001b[31m19.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting smmap<6,>=3.0.1 (from gitdb<5,>=4.0.1->GitPython!=3.1.29,>=1.0.0->wandb->axolotl==0.3.0)\n", - " Downloading smmap-5.0.1-py3-none-any.whl (24 kB)\n", - "Requirement already satisfied: googleapis-common-protos<2.0.dev0,>=1.56.2 in /usr/local/lib/python3.10/dist-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5->google-cloud-storage->gcsfs->axolotl==0.3.0) (1.61.0)\n", - "Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /usr/local/lib/python3.10/dist-packages (from google-resumable-media>=2.3.2->google-cloud-storage->gcsfs->axolotl==0.3.0) (1.5.0)\n", - "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio->axolotl==0.3.0) (2023.11.1)\n", - "Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio->axolotl==0.3.0) (0.31.0)\n", - "Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio->axolotl==0.3.0) (0.12.0)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich>=10.0.0->fschat==0.2.29->axolotl==0.3.0) (0.1.2)\n", - "Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in /usr/local/lib/python3.10/dist-packages (from pyasn1-modules>=0.2.1->google-auth>=1.2->gcsfs->axolotl==0.3.0) (0.5.0)\n", - "Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib->gcsfs->axolotl==0.3.0) (3.2.2)\n", - "Collecting svgwrite (from wavedrom->markdown2[all]->fschat==0.2.29->axolotl==0.3.0)\n", - " Downloading svgwrite-1.4.3-py3-none-any.whl (67 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.1/67.1 kB\u001b[0m \u001b[31m10.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hBuilding wheels for collected packages: optimum, rouge-score, flash-attn, deepspeed, fire, ffmpy, wavedrom, lit\n", - " Building wheel for optimum (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for optimum: filename=optimum-1.13.2-py3-none-any.whl size=395598 sha256=c50241754999443c85c875c140c77aa5bf3a40f56a5ffd196d1f3c5199609431\n", - " Stored in directory: /root/.cache/pip/wheels/6e/b7/2c/79405d98f0943373d8546daeae25a3d377f7659ca0cbe48699\n", - " Building wheel for rouge-score (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for rouge-score: filename=rouge_score-0.1.2-py3-none-any.whl size=24933 sha256=cc9084a7eefdb136724effd732f6cb7c0fd01f0d7c9e7852043f1cd1d23b45a9\n", - " Stored in directory: /root/.cache/pip/wheels/5f/dd/89/461065a73be61a532ff8599a28e9beef17985c9e9c31e541b4\n", - " Building wheel for flash-attn (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for flash-attn: filename=flash_attn-2.3.3-cp310-cp310-linux_x86_64.whl size=57075008 sha256=bcb63b64213ab61590b340b77de84e448a442e19c100480895194df39ad7673d\n", - " Stored in directory: /root/.cache/pip/wheels/e5/e6/fa/941802ec61d1afd320d27160ab1db98e6dba65381f84b76d4a\n", - " Building wheel for deepspeed (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for deepspeed: filename=deepspeed-0.12.3-py3-none-any.whl size=1279165 sha256=e678c7b56fa15c9218f88c9a8a48728bee271215c554abcb4049f609e47adb25\n", - " Stored in directory: /root/.cache/pip/wheels/ee/2b/c5/892ceee06964ce8aa2a98d4260848d0d9a3f1e743862e4b45a\n", - " Building wheel for fire (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for fire: filename=fire-0.5.0-py2.py3-none-any.whl size=116933 sha256=2e829d74a791417c18e5d80016d0125dc65481ae4577bfd4041e51aa64558256\n", - " Stored in directory: /root/.cache/pip/wheels/90/d4/f7/9404e5db0116bd4d43e5666eaa3e70ab53723e1e3ea40c9a95\n", - " Building wheel for ffmpy (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for ffmpy: filename=ffmpy-0.3.1-py3-none-any.whl size=5579 sha256=dd713b1fe6d28ec101c96dbf7033241901635fcddd42a07d9e97fafada70eca4\n", - " Stored in directory: /root/.cache/pip/wheels/01/a6/d1/1c0828c304a4283b2c1639a09ad86f83d7c487ef34c6b4a1bf\n", - " Building wheel for wavedrom (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for wavedrom: filename=wavedrom-2.0.3.post3-py2.py3-none-any.whl size=30053 sha256=65b629500b343fc851f1c23dd2065fa414153974ae25603bba9f99e559ecbf8c\n", - " Stored in directory: /root/.cache/pip/wheels/9c/52/8c/38b454b42f712f325e26f633287484c7dc1ad469e1580c5954\n", - " Building wheel for lit (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for lit: filename=lit-17.0.5-py3-none-any.whl size=93256 sha256=209fa0a842c16d9479d3626694714ebd0b9f4afaaacf487e3fb92d19ecfa9fcf\n", - " Stored in directory: /root/.cache/pip/wheels/1c/87/8e/5a42c0d4be23362b68bbff33b17f3c35a3df44f1cd2f5a24b4\n", - "Successfully built optimum rouge-score flash-attn deepspeed fire ffmpy wavedrom lit\n", - "Installing collected packages: sentencepiece, pydub, ninja, nh3, lit, hjson, ffmpy, bitsandbytes, addict, websockets, urllib3, typing-extensions, svgwrite, smmap, shortuuid, setproctitle, semantic-version, rouge, python-multipart, pynvml, pyarrow-hotfix, orjson, nvidia-nvtx-cu11, nvidia-nccl-cu11, nvidia-cusparse-cu11, nvidia-curand-cu11, nvidia-cufft-cu11, nvidia-cuda-runtime-cu11, nvidia-cuda-nvrtc-cu11, nvidia-cuda-cupti-cu11, nvidia-cublas-cu11, numpy, markdown2, jmespath, humanfriendly, hf_transfer, h11, fire, einops, docker-pycreds, dill, colorama, art, aioitertools, aiofiles, wavedrom, uvicorn, starlette, sentry-sdk, rouge-score, nvidia-cusolver-cu11, nvidia-cudnn-cu11, multiprocess, httpcore, gitdb, gekko, coloredlogs, botocore, tiktoken, responses, huggingface-hub, httpx, GitPython, fastapi, aiobotocore, wandb, tokenizers, s3fs, gradio-client, fschat, datasets, transformers, gradio, evaluate, triton, torch, accelerate, peft, xformers, optimum, bert-score, auto-gptq, flash-attn, deepspeed, axolotl\n", - " Attempting uninstall: urllib3\n", - " Found existing installation: urllib3 2.0.7\n", - " Uninstalling urllib3-2.0.7:\n", - " Successfully uninstalled urllib3-2.0.7\n", - " Attempting uninstall: typing-extensions\n", - " Found existing installation: typing_extensions 4.5.0\n", - " Uninstalling typing_extensions-4.5.0:\n", - " Successfully uninstalled typing_extensions-4.5.0\n", - " Attempting uninstall: numpy\n", - " Found existing installation: numpy 1.23.5\n", - " Uninstalling numpy-1.23.5:\n", - " Successfully uninstalled numpy-1.23.5\n", - " Attempting uninstall: huggingface-hub\n", - " Found existing installation: huggingface-hub 0.19.3\n", - " Uninstalling huggingface-hub-0.19.3:\n", - " Successfully uninstalled huggingface-hub-0.19.3\n", - " Attempting uninstall: tokenizers\n", - " Found existing installation: tokenizers 0.15.0\n", - " Uninstalling tokenizers-0.15.0:\n", - " Successfully uninstalled tokenizers-0.15.0\n", - " Attempting uninstall: transformers\n", - " Found existing installation: transformers 4.35.2\n", - " Uninstalling transformers-4.35.2:\n", - " Successfully uninstalled transformers-4.35.2\n", - " Attempting uninstall: triton\n", - " Found existing installation: triton 2.1.0\n", - " Uninstalling triton-2.1.0:\n", - " Successfully uninstalled triton-2.1.0\n", - " Attempting uninstall: torch\n", - " Found existing installation: torch 2.1.0+cu118\n", - " Uninstalling torch-2.1.0+cu118:\n", - " Successfully uninstalled torch-2.1.0+cu118\n", - " Running setup.py develop for axolotl\n", - "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", - "lida 0.0.10 requires kaleido, which is not installed.\n", - "llmx 0.0.15a0 requires cohere, which is not installed.\n", - "llmx 0.0.15a0 requires openai, which is not installed.\n", - "cupy-cuda11x 11.0.0 requires numpy<1.26,>=1.20, but you have numpy 1.26.2 which is incompatible.\n", - "tensorflow-probability 0.22.0 requires typing-extensions<4.6.0, but you have typing-extensions 4.8.0 which is incompatible.\n", - "torchaudio 2.1.0+cu118 requires torch==2.1.0, but you have torch 2.0.1 which is incompatible.\n", - "torchdata 0.7.0 requires torch==2.1.0, but you have torch 2.0.1 which is incompatible.\n", - "torchtext 0.16.0 requires torch==2.1.0, but you have torch 2.0.1 which is incompatible.\n", - "torchvision 0.16.0+cu118 requires torch==2.1.0, but you have torch 2.0.1 which is incompatible.\u001b[0m\u001b[31m\n", - "\u001b[0mSuccessfully installed GitPython-3.1.40 accelerate-0.24.1 addict-2.4.0 aiobotocore-2.5.4 aiofiles-23.2.1 aioitertools-0.11.0 art-6.1 auto-gptq-0.5.1 axolotl-0.3.0 bert-score-0.3.13 bitsandbytes-0.41.2.post2 botocore-1.31.17 colorama-0.4.6 coloredlogs-15.0.1 datasets-2.14.7 deepspeed-0.12.3 dill-0.3.7 docker-pycreds-0.4.0 einops-0.7.0 evaluate-0.4.0 fastapi-0.104.1 ffmpy-0.3.1 fire-0.5.0 flash-attn-2.3.3 fschat-0.2.29 gekko-1.0.6 gitdb-4.0.11 gradio-3.50.2 gradio-client-0.6.1 h11-0.14.0 hf_transfer-0.1.4 hjson-3.1.0 httpcore-1.0.2 httpx-0.25.1 huggingface-hub-0.17.3 humanfriendly-10.0 jmespath-1.0.1 lit-17.0.5 markdown2-2.4.10 multiprocess-0.70.15 nh3-0.2.14 ninja-1.11.1.1 numpy-1.26.2 nvidia-cublas-cu11-11.10.3.66 nvidia-cuda-cupti-cu11-11.7.101 nvidia-cuda-nvrtc-cu11-11.7.99 nvidia-cuda-runtime-cu11-11.7.99 nvidia-cudnn-cu11-8.5.0.96 nvidia-cufft-cu11-10.9.0.58 nvidia-curand-cu11-10.2.10.91 nvidia-cusolver-cu11-11.4.0.1 nvidia-cusparse-cu11-11.7.4.91 nvidia-nccl-cu11-2.14.3 nvidia-nvtx-cu11-11.7.91 optimum-1.13.2 orjson-3.9.10 peft-0.6.0 pyarrow-hotfix-0.5 pydub-0.25.1 pynvml-11.5.0 python-multipart-0.0.6 responses-0.18.0 rouge-1.0.1 rouge-score-0.1.2 s3fs-2023.6.0 semantic-version-2.10.0 sentencepiece-0.1.99 sentry-sdk-1.35.0 setproctitle-1.3.3 shortuuid-1.0.11 smmap-5.0.1 starlette-0.27.0 svgwrite-1.4.3 tiktoken-0.5.1 tokenizers-0.14.1 torch-2.0.1 transformers-4.35.1 triton-2.0.0 typing-extensions-4.8.0 urllib3-1.26.18 uvicorn-0.24.0.post1 wandb-0.16.0 wavedrom-2.0.3.post3 websockets-11.0.3 xformers-0.0.22\n" - ] - } - ], - "source": [ - "# Install axolotl\n", - "!git clone https://github.com/OpenAccess-AI-Collective/axolotl && cd axolotl && git checkout 797f3dd1de8fd8c0eafbd1c9fdb172abd9ff840a #0.3.0\n", - "!cd axolotl\n", - "!pip install packaging\n", - "!cd axolotl && pip install -e '.[flash-attn,deepspeed]'" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "ET82VllsW6gU", - "outputId": "27e4d16a-da64-46ed-b927-ce12b3f9af6d" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "accelerate configuration saved at /root/.cache/huggingface/accelerate/default_config.yaml\n" - ] - } - ], - "source": [ - "!accelerate config default" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "n_xquNdQYsMX", - "outputId": "68de83a9-2e5a-49e6-ff06-5013eb085370" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: accelerate in /usr/local/lib/python3.10/dist-packages (0.24.1)\n", - "Requirement already satisfied: bitsandbytes in /usr/local/lib/python3.10/dist-packages (0.41.2.post2)\n", - "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from accelerate) (1.26.2)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from accelerate) (23.2)\n", - "Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate) (5.9.5)\n", - "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from accelerate) (6.0.1)\n", - "Requirement already satisfied: torch>=1.10.0 in /usr/local/lib/python3.10/dist-packages (from accelerate) (2.0.1)\n", - "Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.10/dist-packages (from accelerate) (0.17.3)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (3.13.1)\n", - "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (4.8.0)\n", - "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (1.12)\n", - "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (3.2.1)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (3.1.2)\n", - "Requirement already satisfied: nvidia-cuda-nvrtc-cu11==11.7.99 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (11.7.99)\n", - "Requirement already satisfied: nvidia-cuda-runtime-cu11==11.7.99 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (11.7.99)\n", - "Requirement already satisfied: nvidia-cuda-cupti-cu11==11.7.101 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (11.7.101)\n", - "Requirement already satisfied: nvidia-cudnn-cu11==8.5.0.96 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (8.5.0.96)\n", - "Requirement already satisfied: nvidia-cublas-cu11==11.10.3.66 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (11.10.3.66)\n", - "Requirement already satisfied: nvidia-cufft-cu11==10.9.0.58 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (10.9.0.58)\n", - "Requirement already satisfied: nvidia-curand-cu11==10.2.10.91 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (10.2.10.91)\n", - "Requirement already satisfied: nvidia-cusolver-cu11==11.4.0.1 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (11.4.0.1)\n", - "Requirement already satisfied: nvidia-cusparse-cu11==11.7.4.91 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (11.7.4.91)\n", - "Requirement already satisfied: nvidia-nccl-cu11==2.14.3 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (2.14.3)\n", - "Requirement already satisfied: nvidia-nvtx-cu11==11.7.91 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (11.7.91)\n", - "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.10.0->accelerate) (2.0.0)\n", - "Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from nvidia-cublas-cu11==11.10.3.66->torch>=1.10.0->accelerate) (67.7.2)\n", - "Requirement already satisfied: wheel in /usr/local/lib/python3.10/dist-packages (from nvidia-cublas-cu11==11.10.3.66->torch>=1.10.0->accelerate) (0.41.3)\n", - "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.10.0->accelerate) (3.27.7)\n", - "Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.10.0->accelerate) (17.0.5)\n", - "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from huggingface-hub->accelerate) (2023.6.0)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from huggingface-hub->accelerate) (2.31.0)\n", - "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub->accelerate) (4.66.1)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.10.0->accelerate) (2.1.3)\n", - "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub->accelerate) (3.3.2)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub->accelerate) (3.4)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub->accelerate) (1.26.18)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub->accelerate) (2023.7.22)\n", - "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.10.0->accelerate) (1.3.0)\n", - "/content\n" - ] - } - ], - "source": [ - "!pip install accelerate bitsandbytes\n", - "!pwd" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "IZv_VnRrtTSz", - "outputId": "36439ab2-c4de-46b5-dd36-b90d09db8358" - }, - "outputs": [ - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import torch\n", - "torch.cuda.is_available()" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "9SwNYGmisJU6", - "outputId": "963ffc20-dd38-48e4-f72d-8a4d78e36461" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (4.35.1)\n", - "Collecting transformers\n", - " Downloading transformers-4.35.2-py3-none-any.whl (7.9 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.9/7.9 MB\u001b[0m \u001b[31m24.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers) (3.13.1)\n", - "Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.17.3)\n", - "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (1.26.2)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers) (23.2)\n", - "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (6.0.1)\n", - "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (2023.6.3)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers) (2.31.0)\n", - "Requirement already satisfied: tokenizers<0.19,>=0.14 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.14.1)\n", - "Requirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.4.0)\n", - "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers) (4.66.1)\n", - "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->transformers) (2023.6.0)\n", - "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->transformers) (4.8.0)\n", - "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (3.3.2)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (3.4)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (1.26.18)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (2023.7.22)\n", - "Installing collected packages: transformers\n", - " Attempting uninstall: transformers\n", - " Found existing installation: transformers 4.35.1\n", - " Uninstalling transformers-4.35.1:\n", - " Successfully uninstalled transformers-4.35.1\n", - "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", - "axolotl 0.3.0 requires transformers==4.35.1, but you have transformers 4.35.2 which is incompatible.\u001b[0m\u001b[31m\n", - "\u001b[0mSuccessfully installed transformers-4.35.2\n" - ] - } - ], - "source": [ - "!pip install --upgrade transformers" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "jCsnQhmave0Z", - "outputId": "6c9f39ef-0e4a-41cb-ee22-8c71438e254e" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Collecting flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE\n", - " Downloading https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.0/flash_attn-2.3.0+cu117torch2.0cxx11abiFALSE-cp310-cp310-linux_x86_64.whl (30.0 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m30.0/30.0 MB\u001b[0m \u001b[31m46.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (2.0.1)\n", - "Requirement already satisfied: einops in /usr/local/lib/python3.10/dist-packages (from flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (0.7.0)\n", - "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (23.2)\n", - "Requirement already satisfied: ninja in /usr/local/lib/python3.10/dist-packages (from flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (1.11.1.1)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (3.13.1)\n", - "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (4.8.0)\n", - "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (1.12)\n", - "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (3.2.1)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (3.1.2)\n", - "Requirement already satisfied: nvidia-cuda-nvrtc-cu11==11.7.99 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (11.7.99)\n", - "Requirement already satisfied: nvidia-cuda-runtime-cu11==11.7.99 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (11.7.99)\n", - "Requirement already satisfied: nvidia-cuda-cupti-cu11==11.7.101 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (11.7.101)\n", - "Requirement already satisfied: nvidia-cudnn-cu11==8.5.0.96 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (8.5.0.96)\n", - "Requirement already satisfied: nvidia-cublas-cu11==11.10.3.66 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (11.10.3.66)\n", - "Requirement already satisfied: nvidia-cufft-cu11==10.9.0.58 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (10.9.0.58)\n", - "Requirement already satisfied: nvidia-curand-cu11==10.2.10.91 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (10.2.10.91)\n", - "Requirement already satisfied: nvidia-cusolver-cu11==11.4.0.1 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (11.4.0.1)\n", - "Requirement already satisfied: nvidia-cusparse-cu11==11.7.4.91 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (11.7.4.91)\n", - "Requirement already satisfied: nvidia-nccl-cu11==2.14.3 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (2.14.3)\n", - "Requirement already satisfied: nvidia-nvtx-cu11==11.7.91 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (11.7.91)\n", - "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (2.0.0)\n", - "Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from nvidia-cublas-cu11==11.10.3.66->torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (67.7.2)\n", - "Requirement already satisfied: wheel in /usr/local/lib/python3.10/dist-packages (from nvidia-cublas-cu11==11.10.3.66->torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (0.41.3)\n", - "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (3.27.7)\n", - "Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (17.0.5)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (2.1.3)\n", - "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch->flash-attn==2.3.0+cu117torch2.0cxx11abiFALSE) (1.3.0)\n", - "Installing collected packages: flash-attn\n", - " Attempting uninstall: flash-attn\n", - " Found existing installation: flash-attn 2.3.3\n", - " Uninstalling flash-attn-2.3.3:\n", - " Successfully uninstalled flash-attn-2.3.3\n", - "Successfully installed flash-attn-2.3.0\n" - ] - } - ], - "source": [ - "# https://github.com/oobabooga/text-generation-webui/issues/4238\n", - "!pip install https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.0/flash_attn-2.3.0+cu117torch2.0cxx11abiFALSE-cp310-cp310-linux_x86_64.whl" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3xU248nEtTxg" - }, - "source": [ - "Start the training process (fine-tuning)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "19jClLyTbumJ", - "outputId": "9b74bad8-956d-4434-953a-d5a5be229043" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2023-11-18 10:15:30.581758: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", - "2023-11-18 10:15:30.581829: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", - "2023-11-18 10:15:30.581870: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-11-18 10:15:32.302565: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", - "/usr/local/lib/python3.10/dist-packages/transformers/deepspeed.py:23: FutureWarning: transformers.deepspeed module is deprecated and will be removed in a future version. Please import deepspeed modules directly from transformers.integrations\n", - " warnings.warn(\n", - " dP dP dP \n", - " 88 88 88 \n", - " .d8888b. dP. .dP .d8888b. 88 .d8888b. d8888P 88 \n", - " 88' `88 `8bd8' 88' `88 88 88' `88 88 88 \n", - " 88. .88 .d88b. 88. .88 88 88. .88 88 88 \n", - " `88888P8 dP' `dP `88888P' dP `88888P' dP dP \n", - " \n", - " \n", - "\n", - "\u001b[33m[2023-11-18 10:15:36,028] [WARNING] [axolotl.validate_config:169] [PID:4655] [RANK:0] eval_batch_size != micro_batch_size. This can lead to VRAM instability.\u001b[39m\n", - "[2023-11-18 10:15:36,239] [INFO] [axolotl.normalize_config:128] [PID:4655] [RANK:0] GPU memory usage baseline: 0.000GB (+0.255GB misc)\u001b[39m\n", - "\u001b[33m[2023-11-18 10:15:36,239] [WARNING] [axolotl.scripts.check_accelerate_default_config:343] [PID:4655] [RANK:0] accelerate config file found at /root/.cache/huggingface/accelerate/default_config.yaml. This can lead to unexpected errors\u001b[39m\n", - "\u001b[33m[2023-11-18 10:15:36,239] [WARNING] [axolotl.scripts.check_user_token:355] [PID:4655] [RANK:0] Error verifying HuggingFace token. Remember to log in using `huggingface-cli login` and get your access token from https://huggingface.co/settings/tokens if you want to use gated models or datasets.\u001b[39m\n", - "[2023-11-18 10:15:36,594] [DEBUG] [axolotl.load_tokenizer:100] [PID:4655] [RANK:0] EOS: 2 / \u001b[39m\n", - "[2023-11-18 10:15:36,595] [DEBUG] [axolotl.load_tokenizer:101] [PID:4655] [RANK:0] BOS: 1 / \u001b[39m\n", - "[2023-11-18 10:15:36,595] [DEBUG] [axolotl.load_tokenizer:102] [PID:4655] [RANK:0] PAD: 0 / \u001b[39m\n", - "[2023-11-18 10:15:36,595] [DEBUG] [axolotl.load_tokenizer:103] [PID:4655] [RANK:0] UNK: 0 / \u001b[39m\n", - "[2023-11-18 10:15:36,595] [INFO] [axolotl.load_tokenized_prepared_datasets:147] [PID:4655] [RANK:0] Unable to find prepared dataset in last_run_prepared/5dca4483042d16053f3cd9eeaf5ac8af\u001b[39m\n", - "[2023-11-18 10:15:36,595] [INFO] [axolotl.load_tokenized_prepared_datasets:148] [PID:4655] [RANK:0] Loading raw datasets...\u001b[39m\n", - "[2023-11-18 10:15:36,595] [INFO] [axolotl.load_tokenized_prepared_datasets:153] [PID:4655] [RANK:0] No seed provided, using default seed of 42\u001b[39m\n", - "Map (num_proc=2): 100% 846/846 [00:00<00:00, 991.41 examples/s] \n", - "[2023-11-18 10:15:37,890] [INFO] [axolotl.load_tokenized_prepared_datasets:355] [PID:4655] [RANK:0] merging datasets\u001b[39m\n", - "[2023-11-18 10:15:37,892] [INFO] [axolotl.load_tokenized_prepared_datasets:362] [PID:4655] [RANK:0] Saving merged prepared dataset to disk... last_run_prepared/5dca4483042d16053f3cd9eeaf5ac8af\u001b[39m\n", - "Saving the dataset (1/1 shards): 100% 846/846 [00:00<00:00, 118881.71 examples/s]\n", - "Filter (num_proc=2): 100% 803/803 [00:00<00:00, 3171.26 examples/s]\n", - "Filter (num_proc=2): 100% 43/43 [00:00<00:00, 301.68 examples/s]\n", - "Map (num_proc=2): 100% 803/803 [00:00<00:00, 2783.35 examples/s]\n", - "[2023-11-18 10:15:38,745] [DEBUG] [axolotl.log:60] [PID:4655] [RANK:0] total_num_tokens: 77893\u001b[39m\n", - "[2023-11-18 10:15:38,753] [DEBUG] [axolotl.log:60] [PID:4655] [RANK:0] `total_supervised_tokens: 77893`\u001b[39m\n", - "[2023-11-18 10:15:44,265] [INFO] [axolotl.utils.samplers.multipack._len_est:178] [PID:4655] [RANK:0] packing_efficiency_estimate: 1.0 total_num_tokens per device: 77893\u001b[39m\n", - "[2023-11-18 10:15:44,265] [DEBUG] [axolotl.log:60] [PID:4655] [RANK:0] data_loader_len: 8\u001b[39m\n", - "[2023-11-18 10:15:44,265] [INFO] [axolotl.log:60] [PID:4655] [RANK:0] sample_packing_eff_est across ranks: [0.95084228515625]\u001b[39m\n", - "[2023-11-18 10:15:44,265] [DEBUG] [axolotl.log:60] [PID:4655] [RANK:0] sample_packing_eff_est: 0.96\u001b[39m\n", - "[2023-11-18 10:15:44,266] [DEBUG] [axolotl.log:60] [PID:4655] [RANK:0] total_num_steps: 32\u001b[39m\n", - "[2023-11-18 10:15:44,266] [DEBUG] [axolotl.train.log:60] [PID:4655] [RANK:0] loading tokenizer... NousResearch/Llama-2-7b-hf\u001b[39m\n", - "[2023-11-18 10:15:44,629] [DEBUG] [axolotl.load_tokenizer:100] [PID:4655] [RANK:0] EOS: 2 / \u001b[39m\n", - "[2023-11-18 10:15:44,629] [DEBUG] [axolotl.load_tokenizer:101] [PID:4655] [RANK:0] BOS: 1 / \u001b[39m\n", - "[2023-11-18 10:15:44,629] [DEBUG] [axolotl.load_tokenizer:102] [PID:4655] [RANK:0] PAD: 0 / \u001b[39m\n", - "[2023-11-18 10:15:44,629] [DEBUG] [axolotl.load_tokenizer:103] [PID:4655] [RANK:0] UNK: 0 / \u001b[39m\n", - "[2023-11-18 10:15:44,630] [DEBUG] [axolotl.train.log:60] [PID:4655] [RANK:0] loading model and peft_config...\u001b[39m\n", - "[2023-11-18 10:15:44,713] [INFO] [axolotl.load_model:201] [PID:4655] [RANK:0] patching _expand_mask\u001b[39m\n", - "Downloading (…)fetensors.index.json: 100% 26.8k/26.8k [00:00<00:00, 34.9MB/s]\n", - "Downloading shards: 0% 0/2 [00:00> "$GITHUB_OUTPUT" - # Send the summary somewhere (e.g. Discord) - - name: Discord notification - env: - DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK_URL }} - DISCORD_USERNAME: "discord-bot" - DISCORD_AVATAR: "" - uses: Ilshidur/action-discord@master - with: - args: ${{ steps.summarize.outputs.message }} \ No newline at end of file diff --git a/examples/insomnia/Insomnia_LocalAI.json b/examples/insomnia/Insomnia_LocalAI.json deleted file mode 100644 index fed32f85..00000000 --- a/examples/insomnia/Insomnia_LocalAI.json +++ /dev/null @@ -1 +0,0 @@ -{"_type":"export","__export_format":4,"__export_date":"2023-09-01T05:11:43.695Z","__export_source":"insomnia.desktop.app:v2023.5.7","resources":[{"_id":"req_527fdc87fd404a2a8f1c401fb7a0e642","parentId":"fld_911f4d2a05d84b59aff9d4924d1d3877","modified":1692719560635,"created":1692719560635,"url":"{{HOST}}:{{PORT}}/models","name":"get models list","description":"","method":"GET","body":{},"parameters":[],"headers":[],"authentication":{},"metaSortKey":-1692719560635,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"fld_911f4d2a05d84b59aff9d4924d1d3877","parentId":"wrk_76923a29272642e49208d65ffe7e885a","modified":1692719560581,"created":1692719560581,"name":"LocalAI","description":"","environment":{},"environmentPropertyOrder":null,"metaSortKey":-1692719560581,"_type":"request_group"},{"_id":"wrk_76923a29272642e49208d65ffe7e885a","parentId":null,"modified":1692719728510,"created":1692719560576,"name":"LocalAI","description":"","scope":"collection","_type":"workspace"},{"_id":"req_03c6b65bce1541fa9a7751c7ed8a7f40","parentId":"fld_9d70a564f6334ff6a5e9473d124d8ee6","modified":1693542905270,"created":1692719560630,"url":"{{HOST}}:{{PORT}}/models/available","name":"list MODELS in galleries","description":"","method":"GET","body":{"mimeType":"","text":"{\n}"},"parameters":[],"headers":[],"authentication":{},"metaSortKey":-1692719560630,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"fld_9d70a564f6334ff6a5e9473d124d8ee6","parentId":"fld_911f4d2a05d84b59aff9d4924d1d3877","modified":1692719560625,"created":1692719560625,"name":"model gallery","description":"","environment":{},"environmentPropertyOrder":null,"metaSortKey":-1692719560625,"_type":"request_group"},{"_id":"req_9cfc92cb7f5c43b6bea7992d54e94eca","parentId":"fld_9d70a564f6334ff6a5e9473d124d8ee6","modified":1693542894779,"created":1693526412262,"url":"{{HOST}}:{{PORT}}/models/galleries","name":"list model GALLERIES","description":"","method":"GET","body":{"mimeType":"","text":"{\n}"},"parameters":[],"headers":[],"authentication":{},"metaSortKey":-1692719560628.5,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"req_2cec05b38d9049c5bee24dcac03d1214","parentId":"fld_9d70a564f6334ff6a5e9473d124d8ee6","modified":1692773355999,"created":1692719560627,"url":"{{HOST}}:{{PORT}}/models/apply","name":"model gallery apply","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\n \"id\": \"huggingface@TheBloke/wizardlm-13b-v1.2-ggml/wizardlm-13b-v1.2.ggmlv3.q4_0.bin\",\n \"name\": \"test\"\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560627,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"req_f91d77c51f1d41d9b035ed083275441a","parentId":"fld_9d70a564f6334ff6a5e9473d124d8ee6","modified":1693545040139,"created":1693527252021,"url":"{{HOST}}:{{PORT}}/models/galleries","name":"add model gallery","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\n \"url\": \"file:///home/dave/projects/model-gallery/huggingface/TheBloke__CodeLlama-7B-Instruct-GGML.yaml\",\n \"name\": \"test\"\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560625.5,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"req_626fee90a5a04947a8545062e9a52350","parentId":"fld_9d70a564f6334ff6a5e9473d124d8ee6","modified":1693544472707,"created":1693544399269,"url":"{{HOST}}:{{PORT}}/models/galleries","name":"delete model gallery","description":"","method":"DELETE","body":{"mimeType":"application/json","text":"{\n \"name\": \"test\"\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560624.75,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"req_e1e150aa05c54bb49f5bceca33d4d917","parentId":"fld_9d70a564f6334ff6a5e9473d124d8ee6","modified":1693537478093,"created":1692826085669,"url":"{{HOST}}:{{PORT}}/models/apply","name":"model gallery apply (gist)","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\n \"id\": \"TheBloke__CodeLlama-7B-Instruct-GGML__codellama-7b-instruct.ggmlv3.Q2_K.bin\"\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560624,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"req_d9f9175a0e1e4a8facf19e365c89403b","parentId":"fld_4a966fb07756459d9d7c1f5a5561228f","modified":1692722441541,"created":1692722374898,"url":"{{HOST}}:{{PORT}}/tts","name":"/tts","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\n \"model\": \"{{DEFAULT_MODEL}}\",\n \"input\": \"A STRANGE GAME.\\nTHE ONLY WINNING MOVE IS NOT TO PLAY.\\n\\nHOW ABOUT A NICE GAME OF CHESS?\"\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560630,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"fld_4a966fb07756459d9d7c1f5a5561228f","parentId":"fld_911f4d2a05d84b59aff9d4924d1d3877","modified":1692722533229,"created":1692722439678,"name":"tts","description":"","environment":{},"environmentPropertyOrder":null,"metaSortKey":-1692719560612.5,"_type":"request_group"},{"_id":"req_1dec0983884c4b93acf3d8843108c003","parentId":"fld_2bc22ec3590240cd8d465fe084f5e14d","modified":1692722121942,"created":1692719560608,"url":"{{HOST}}:{{PORT}}/chat/completions","name":"chat completion (simple, 1 message)","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\r\n \"model\": \"{{DEFAULT_MODEL}}\",\r\n \"messages\": [{\"role\": \"user\", \"content\": \"How could one use friction to cook an egg?\"}],\r\n \"max_tokens\": 256,\r\n \"temperature\": 0.2\r\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560611.5,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"fld_2bc22ec3590240cd8d465fe084f5e14d","parentId":"fld_9584156ef4534e86b71735ed2b1e86e5","modified":1692721647772,"created":1692721647772,"name":"chat","description":"","environment":{},"environmentPropertyOrder":null,"metaSortKey":-1692721647772,"_type":"request_group"},{"_id":"fld_9584156ef4534e86b71735ed2b1e86e5","parentId":"fld_911f4d2a05d84b59aff9d4924d1d3877","modified":1692719560600,"created":1692719560600,"name":"llm text","description":"","environment":{},"environmentPropertyOrder":null,"metaSortKey":-1692719560600,"_type":"request_group"},{"_id":"req_e2ca5ec3feae4fd7b0b4162dad261f08","parentId":"fld_2bc22ec3590240cd8d465fe084f5e14d","modified":1692722124781,"created":1692719560615,"url":"{{HOST}}:{{PORT}}/chat/completions","name":"chat/completions (long)","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\r\n \"model\": \"{{DEFAULT_MODEL}}\",\r\n \"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful, intelligent pirate assistant. Always answer as helpfully as possible, while being a pirate and using pirate language. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\"},\r\n {\"role\": \"user\", \"content\": \"How could one use electricity to cook an egg?\"},\r\n {\"role\": \"assistant\",\r\n \"content\": \"Shiver me timbers! Using electricity to cook an egg? Well, matey, I reckon that be a right curious idea! *adjusts spectacles* Now, I ain't sure if this be possible, but I'll do me best to help ye out.\\n\\nFirst things first, ye gotta understand that electricity be a powerful force, and it can't just cook an egg on its own. Ye see, electricity be like a mighty wind that can make things happen, but it needs somethin' to work with. So, if ye want to use electricity to cook an egg, ye gotta find a way to harness that power and make it do the cookin'.\\n\\nNow, I know what ye might be thinkin': \\\"How do I harness the power of electricity to cook an egg?\\\" Well, matey, there be a few ways to do it. One way be to use a special device called an \\\"electric frying pan.\\\" This be a pan that has a built-in heating element that gets hot when ye plug it into a wall socket. When the element gets hot, ye can crack an egg into the pan and watch as it cook\"\r\n },\r\n {\"role\": \"user\", \"content\": \"I don't have one of those, just a raw wire and plenty of power! How do we get it done?\"}],\r\n \"max_tokens\": 1024,\r\n \"temperature\": 0.5\r\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560561.5,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"req_0a66d6bfaff14b439ade641ce7469f16","parentId":"fld_2bc22ec3590240cd8d465fe084f5e14d","modified":1692722128583,"created":1692719560619,"url":"{{HOST}}:{{PORT}}/chat/completions","name":"chat/completions (stream)","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\n \"model\": \"{{DEFAULT_MODEL}}\",\n \"messages\": [{\"role\": \"user\", \"content\": \"Explain how I can set sail on the ocean using only power generated by seagulls?\"}],\n \"max_tokens\": 256,\n \"temperature\": 0.9,\n \"stream\": true\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560511.5,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"req_c1f5faeb3d8a4a0eb52c98d40c32f8f9","parentId":"fld_9584156ef4534e86b71735ed2b1e86e5","modified":1692722131493,"created":1692719560621,"url":"{{HOST}}:{{PORT}}/completions","name":"/completions","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\r\n \"model\": \"{{DEFAULT_MODEL}}\",\r\n \"prompt\": \"function downloadFile(string url, string outputPath) {\",\r\n \"max_tokens\": 256,\r\n \"temperature\": 0.5\r\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560621,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"req_f7ea027749b34f17a38f2bd549885f92","parentId":"fld_9584156ef4534e86b71735ed2b1e86e5","modified":1692722148262,"created":1692721748683,"url":"{{HOST}}:{{PORT}}/edits","name":"/edits","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\n \"model\": \"{{DEFAULT_MODEL}}\",\n \"input\": \"What day of the wek is it?\",\n \"instruction\": \"Fix the spelling mistakes\"\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560616.25,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"req_fe1e62ed6c384eccb0a75fbaacfd8e92","parentId":"fld_9584156ef4534e86b71735ed2b1e86e5","modified":1692722327277,"created":1692722256486,"url":"{{HOST}}:{{PORT}}/embeddings","name":"/embeddings","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\n \"model\": \"{{DEFAULT_MODEL}}\",\n \"input\": \"A STRANGE GAME.\\nTHE ONLY WINNING MOVE IS NOT TO PLAY.\\n\\nHOW ABOUT A NICE GAME OF CHESS?\"\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560613.875,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"req_86b544c9e5324512ae2c830e8c2831ba","parentId":"fld_220b3e247cd940d0b615122f75b4da32","modified":1692722115897,"created":1692719560594,"url":"{{HOST}}:{{PORT}}/backend/shutdown","name":"backend/shutdown","description":"","method":"POST","body":{"mimeType":"application/json","text":"{\r\n \"model\": \"{{DEFAULT_MODEL}}\"\r\n}"},"parameters":[],"headers":[{"name":"Content-Type","value":"application/json"}],"authentication":{},"metaSortKey":-1692719560594,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"fld_220b3e247cd940d0b615122f75b4da32","parentId":"fld_911f4d2a05d84b59aff9d4924d1d3877","modified":1692719560584,"created":1692719560584,"name":"backend monitor","description":"","environment":{},"environmentPropertyOrder":null,"metaSortKey":-1692719560584,"_type":"request_group"},{"_id":"req_395dfb3a370d470d907532dc94f91d3f","parentId":"fld_220b3e247cd940d0b615122f75b4da32","modified":1692719560587,"created":1692719560587,"url":"{{HOST}}:{{PORT}}/backend/monitor","name":"backend monitor","description":"","method":"GET","body":{"mimeType":"","text":"{\r\n \"model\": \"{{DEFAULT_MODEL}}\"\r\n}"},"parameters":[],"headers":[],"authentication":{},"metaSortKey":-1692719560587,"isPrivate":false,"settingStoreCookies":true,"settingSendCookies":true,"settingDisableRenderRequestBody":false,"settingEncodeUrl":true,"settingRebuildPath":true,"settingFollowRedirects":"global","_type":"request"},{"_id":"env_b7b4937221a512490c06b95c4986a60800670c2f","parentId":"wrk_76923a29272642e49208d65ffe7e885a","modified":1692720330621,"created":1692719566107,"name":"Base Environment","data":{"PORT":8080,"DEFAULT_MODEL":"gpt-3.5-turbo"},"dataPropertyOrder":{"&":["PORT","DEFAULT_MODEL"]},"color":null,"isPrivate":false,"metaSortKey":1692719566107,"_type":"environment"},{"_id":"jar_b7b4937221a512490c06b95c4986a60800670c2f","parentId":"wrk_76923a29272642e49208d65ffe7e885a","modified":1692719566120,"created":1692719566120,"name":"Default Jar","cookies":[],"_type":"cookie_jar"},{"_id":"env_4a68ee3db2714cc69d81419acd6b2c31","parentId":"env_b7b4937221a512490c06b95c4986a60800670c2f","modified":1692719629896,"created":1692719607346,"name":"localhost","data":{"HOST":"localhost"},"dataPropertyOrder":{"&":["HOST"]},"color":null,"isPrivate":false,"metaSortKey":1692719607346,"_type":"environment"}]} \ No newline at end of file diff --git a/examples/insomnia/README.md b/examples/insomnia/README.md deleted file mode 100644 index 64994b76..00000000 --- a/examples/insomnia/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Insomnia - -Developer Testing Request Collection for [Insomnia](https://insomnia.rest/), an open-source REST client - -## Instructions - -* Install Insomnia as normal -* [Import](https://docs.insomnia.rest/insomnia/import-export-data) `Insomnia_LocalAI.json` -* Control + E opens the environment settings - - -| **Parameter Name** | **Default Value** | **Description** | -|--------------------|-------------------|------------------------------------------| -| HOST | localhost | LocalAI base URL | -| PORT | 8080 | LocalAI port | -| DEFAULT_MODEL | gpt-3.5-turbo | Name of the model used on most requests. | - -** you may want to duplicate localhost into a "Private" environment to avoid saving private settings back to this file ** \ No newline at end of file diff --git a/examples/k8sgpt/README.md b/examples/k8sgpt/README.md deleted file mode 100644 index 23aa1f8b..00000000 --- a/examples/k8sgpt/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# k8sgpt example - -This example show how to use LocalAI with k8sgpt - -![Screenshot from 2023-06-19 23-58-47](https://github.com/go-skynet/go-ggml-transformers.cpp/assets/2420543/cab87409-ee68-44ae-8d53-41627fb49509) - -## Create the cluster locally with Kind (optional) - -If you want to test this locally without a remote Kubernetes cluster, you can use kind. - -Install [kind](https://kind.sigs.k8s.io/) and create a cluster: - -``` -kind create cluster -``` - -## Setup LocalAI - -We will use [helm](https://helm.sh/docs/intro/install/): - -``` -helm repo add go-skynet https://go-skynet.github.io/helm-charts/ -helm repo update - -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/k8sgpt - -# modify values.yaml preload_models with the models you want to install. -# CHANGE the URL to a model in huggingface. -helm install local-ai go-skynet/local-ai --create-namespace --namespace local-ai --values values.yaml -``` - -## Setup K8sGPT - -``` -# Install k8sgpt -helm repo add k8sgpt https://charts.k8sgpt.ai/ -helm repo update -helm install release k8sgpt/k8sgpt-operator -n k8sgpt-operator-system --create-namespace --version 0.0.17 -``` - -Apply the k8sgpt-operator configuration: - -``` -kubectl apply -f - << EOF -apiVersion: core.k8sgpt.ai/v1alpha1 -kind: K8sGPT -metadata: - name: k8sgpt-local-ai - namespace: default -spec: - backend: localai - baseUrl: http://local-ai.local-ai.svc.cluster.local:8080/v1 - noCache: false - model: gpt-3.5-turbo - version: v0.3.0 - enableAI: true -EOF -``` - -## Test - -Apply a broken pod: - -``` -kubectl apply -f broken-pod.yaml -``` - -## ArgoCD Deployment Example -[Deploy K8sgpt + localai with Argocd](https://github.com/tyler-harpool/gitops/tree/main/infra/k8gpt) diff --git a/examples/k8sgpt/broken-pod.yaml b/examples/k8sgpt/broken-pod.yaml deleted file mode 100644 index aa3cc81c..00000000 --- a/examples/k8sgpt/broken-pod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: broken-pod -spec: - containers: - - name: broken-pod - image: nginx:1.27.2 - livenessProbe: - httpGet: - path: / - port: 90 - initialDelaySeconds: 3 - periodSeconds: 3 \ No newline at end of file diff --git a/examples/k8sgpt/values.yaml b/examples/k8sgpt/values.yaml deleted file mode 100644 index e0075de4..00000000 --- a/examples/k8sgpt/values.yaml +++ /dev/null @@ -1,96 +0,0 @@ -replicaCount: 1 - -deployment: - # https://quay.io/repository/go-skynet/local-ai?tab=tags - image: quay.io/go-skynet/local-ai:v1.40.0 - env: - threads: 4 - debug: "true" - context_size: 512 - galleries: '[{"name":"model-gallery", "url":"github:go-skynet/model-gallery/index.yaml"}, {"url": "github:go-skynet/model-gallery/huggingface.yaml","name":"huggingface"}]' - preload_models: '[{ "id": "huggingface@thebloke__open-llama-13b-open-instruct-ggml__open-llama-13b-open-instruct.ggmlv3.q3_k_m.bin", "name": "gpt-3.5-turbo", "overrides": { "f16": true, "mmap": true }}]' - modelsPath: "/models" - -resources: - {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -# Prompt templates to include -# Note: the keys of this map will be the names of the prompt template files -promptTemplates: - {} - # ggml-gpt4all-j.tmpl: | - # The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response. - # ### Prompt: - # {{.Input}} - # ### Response: - -# Models to download at runtime -models: - # Whether to force download models even if they already exist - forceDownload: false - - # The list of URLs to download models from - # Note: the name of the file will be the name of the loaded model - list: - #- url: "https://gpt4all.io/models/ggml-gpt4all-j.bin" - # basicAuth: base64EncodedCredentials - - # Persistent storage for models and prompt templates. - # PVC and HostPath are mutually exclusive. If both are enabled, - # PVC configuration takes precedence. If neither are enabled, ephemeral - # storage is used. - persistence: - pvc: - enabled: false - size: 6Gi - accessModes: - - ReadWriteOnce - - annotations: {} - - # Optional - storageClass: ~ - - hostPath: - enabled: false - path: "/models" - -service: - type: ClusterIP - port: 8080 - annotations: {} - # If using an AWS load balancer, you'll need to override the default 60s load balancer idle timeout - # service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "1200" - -ingress: - enabled: false - className: "" - annotations: - {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: - - path: / - pathType: ImplementationSpecific - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -nodeSelector: {} - -tolerations: [] - -affinity: {} diff --git a/examples/kubernetes/deployment-intel-arc.yaml b/examples/kubernetes/deployment-intel-arc.yaml deleted file mode 100644 index f77182bd..00000000 --- a/examples/kubernetes/deployment-intel-arc.yaml +++ /dev/null @@ -1,68 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: local-ai ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: models-pvc - namespace: local-ai -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: local-ai - namespace: local-ai - labels: - app: local-ai -spec: - selector: - matchLabels: - app: local-ai - replicas: 1 - template: - metadata: - labels: - app: local-ai - name: local-ai - spec: - containers: - - args: - - phi-2 - env: - - name: DEBUG - value: "true" - name: local-ai - image: quay.io/go-skynet/local-ai:master-sycl-f32-ffmpeg-core - imagePullPolicy: Always - resources: - limits: - gpu.intel.com/i915: 1 - volumeMounts: - - name: models-volume - mountPath: /build/models - volumes: - - name: models-volume - persistentVolumeClaim: - claimName: models-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: local-ai - namespace: local-ai -spec: - selector: - app: local-ai - type: LoadBalancer - ports: - - protocol: TCP - port: 8080 - targetPort: 8080 \ No newline at end of file diff --git a/examples/kubernetes/deployment-nvidia.yaml b/examples/kubernetes/deployment-nvidia.yaml deleted file mode 100644 index c4dfb4e0..00000000 --- a/examples/kubernetes/deployment-nvidia.yaml +++ /dev/null @@ -1,69 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: local-ai ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: models-pvc - namespace: local-ai -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: local-ai - namespace: local-ai - labels: - app: local-ai -spec: - selector: - matchLabels: - app: local-ai - replicas: 1 - template: - metadata: - labels: - app: local-ai - name: local-ai - spec: - runtimeClassName: "nvidia" - containers: - - args: - - phi-2 - env: - - name: DEBUG - value: "true" - name: local-ai - image: quay.io/go-skynet/local-ai:master-cublas-cuda12 - imagePullPolicy: IfNotPresent - resources: - limits: - nvidia.com/gpu: 1 - volumeMounts: - - name: models-volume - mountPath: /build/models - volumes: - - name: models-volume - persistentVolumeClaim: - claimName: models-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: local-ai - namespace: local-ai -spec: - selector: - app: local-ai - type: NodePort - ports: - - protocol: TCP - targetPort: 8080 - port: 8080 \ No newline at end of file diff --git a/examples/kubernetes/deployment.yaml b/examples/kubernetes/deployment.yaml deleted file mode 100644 index 601fffdb..00000000 --- a/examples/kubernetes/deployment.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: local-ai ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: models-pvc - namespace: local-ai -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: local-ai - namespace: local-ai - labels: - app: local-ai -spec: - selector: - matchLabels: - app: local-ai - replicas: 1 - template: - metadata: - labels: - app: local-ai - name: local-ai - spec: - containers: - - args: - - phi-2 - env: - - name: DEBUG - value: "true" - name: local-ai - image: quay.io/go-skynet/local-ai:master-ffmpeg-core - imagePullPolicy: IfNotPresent - volumeMounts: - - name: models-volume - mountPath: /build/models - volumes: - - name: models-volume - persistentVolumeClaim: - claimName: models-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: local-ai - namespace: local-ai -spec: - selector: - app: local-ai - type: LoadBalancer - ports: - - protocol: TCP - port: 8080 - targetPort: 8080 \ No newline at end of file diff --git a/examples/langchain-chroma/.env.example b/examples/langchain-chroma/.env.example deleted file mode 100644 index 54388e31..00000000 --- a/examples/langchain-chroma/.env.example +++ /dev/null @@ -1,8 +0,0 @@ -# CPU .env docs: https://localai.io/howtos/easy-setup-docker-cpu/ -# GPU .env docs: https://localai.io/howtos/easy-setup-docker-gpu/ - -THREADS=4 -CONTEXT_SIZE=512 -MODELS_PATH=/models -DEBUG=true -# BUILD_TYPE=generic \ No newline at end of file diff --git a/examples/langchain-chroma/.gitignore b/examples/langchain-chroma/.gitignore deleted file mode 100644 index 3dc19014..00000000 --- a/examples/langchain-chroma/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -db/ -state_of_the_union.txt -models/bert -models/ggml-gpt4all-j \ No newline at end of file diff --git a/examples/langchain-chroma/README.md b/examples/langchain-chroma/README.md deleted file mode 100644 index 9fd9e312..00000000 --- a/examples/langchain-chroma/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# Data query example - -This example makes use of [langchain and chroma](https://blog.langchain.dev/langchain-chroma/) to enable question answering on a set of documents. - -## Setup - -Download the models and start the API: - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/langchain-chroma - -wget https://huggingface.co/skeskinen/ggml/resolve/main/all-MiniLM-L6-v2/ggml-model-q4_0.bin -O models/bert -wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j - -# configure your .env -# NOTE: ensure that THREADS does not exceed your machine's CPU cores -mv .env.example .env - -# start with docker-compose -docker-compose up -d --build - -# tail the logs & wait until the build completes -docker logs -f langchain-chroma-api-1 -``` - -### Python requirements - -``` -pip install -r requirements.txt -``` - -### Create a storage - -In this step we will create a local vector database from our document set, so later we can ask questions on it with the LLM. - -Note: **OPENAI_API_KEY** is not required. However the library might fail if no API_KEY is passed by, so an arbitrary string can be used. - -```bash -export OPENAI_API_BASE=http://localhost:8080/v1 -export OPENAI_API_KEY=sk- - -wget https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt -python store.py -``` - -After it finishes, a directory "db" will be created with the vector index database. - -## Query - -We can now query the dataset. - -```bash -export OPENAI_API_BASE=http://localhost:8080/v1 -export OPENAI_API_KEY=sk- - -python query.py -# President Trump recently stated during a press conference regarding tax reform legislation that "we're getting rid of all these loopholes." He also mentioned that he wants to simplify the system further through changes such as increasing the standard deduction amount and making other adjustments aimed at reducing taxpayers' overall burden. -``` - -Keep in mind now things are hit or miss! \ No newline at end of file diff --git a/examples/langchain-chroma/docker-compose.yml b/examples/langchain-chroma/docker-compose.yml deleted file mode 100644 index 96ef540e..00000000 --- a/examples/langchain-chroma/docker-compose.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - env_file: - - ../../.env - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai"] diff --git a/examples/langchain-chroma/models b/examples/langchain-chroma/models deleted file mode 120000 index 1e266b1b..00000000 --- a/examples/langchain-chroma/models +++ /dev/null @@ -1 +0,0 @@ -../models \ No newline at end of file diff --git a/examples/langchain-chroma/query.py b/examples/langchain-chroma/query.py deleted file mode 100644 index 61f4c3ea..00000000 --- a/examples/langchain-chroma/query.py +++ /dev/null @@ -1,23 +0,0 @@ - -import os -from langchain.vectorstores import Chroma -from langchain.embeddings import OpenAIEmbeddings -from langchain.chat_models import ChatOpenAI -from langchain.chains import RetrievalQA -from langchain.vectorstores.base import VectorStoreRetriever - -base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') - -# Load and process the text -embedding = OpenAIEmbeddings(model="text-embedding-ada-002", openai_api_base=base_path) -persist_directory = 'db' - -# Now we can load the persisted database from disk, and use it as normal. -llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path) -vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding) -retriever = VectorStoreRetriever(vectorstore=vectordb) -qa = RetrievalQA.from_llm(llm=llm, retriever=retriever) - -query = "What the president said about taxes ?" -print(qa.run(query)) - diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt deleted file mode 100644 index 6a67d98e..00000000 --- a/examples/langchain-chroma/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -langchain==0.3.3 -openai==1.52.2 -chromadb==0.5.13 -llama-index==0.11.20 \ No newline at end of file diff --git a/examples/langchain-chroma/store.py b/examples/langchain-chroma/store.py deleted file mode 100755 index a52cfe04..00000000 --- a/examples/langchain-chroma/store.py +++ /dev/null @@ -1,25 +0,0 @@ - -import os -from langchain.vectorstores import Chroma -from langchain.embeddings import OpenAIEmbeddings -from langchain.text_splitter import CharacterTextSplitter -from langchain.document_loaders import TextLoader - -base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') - -# Load and process the text -loader = TextLoader('state_of_the_union.txt') -documents = loader.load() - -text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=70) -texts = text_splitter.split_documents(documents) - -# Embed and store the texts -# Supplying a persist_directory will store the embeddings on disk -persist_directory = 'db' - -embedding = OpenAIEmbeddings(model="text-embedding-ada-002", openai_api_base=base_path) -vectordb = Chroma.from_documents(documents=texts, embedding=embedding, persist_directory=persist_directory) - -vectordb.persist() -vectordb = None diff --git a/examples/langchain-huggingface/README.md b/examples/langchain-huggingface/README.md deleted file mode 100644 index 23fdcd32..00000000 --- a/examples/langchain-huggingface/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Data query example - -Example of integration with HuggingFace Inference API with help of [langchaingo](https://github.com/tmc/langchaingo). - -## Setup - -Download the LocalAI and start the API: - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/langchain-huggingface - -docker-compose up -d -``` - -Node: Ensure you've set `HUGGINGFACEHUB_API_TOKEN` environment variable, you can generate it -on [Settings / Access Tokens](https://huggingface.co/settings/tokens) page of HuggingFace site. - -This is an example `.env` file for LocalAI: - -```ini -MODELS_PATH=/models -CONTEXT_SIZE=512 -HUGGINGFACEHUB_API_TOKEN=hg_123456 -``` - -## Using remote models - -Now you can use any remote models available via HuggingFace API, for example let's enable using of -[gpt2](https://huggingface.co/gpt2) model in `gpt-3.5-turbo.yaml` config: - -```yml -name: gpt-3.5-turbo -parameters: - model: gpt2 - top_k: 80 - temperature: 0.2 - top_p: 0.7 -context_size: 1024 -backend: "langchain-huggingface" -stopwords: -- "HUMAN:" -- "GPT:" -roles: - user: " " - system: " " -template: - completion: completion - chat: gpt4all -``` - -Here is you can see in field `parameters.model` equal `gpt2` and `backend` equal `langchain-huggingface`. - -## How to use - -```shell -# Now API is accessible at localhost:8080 -curl http://localhost:8080/v1/models -# {"object":"list","data":[{"id":"gpt-3.5-turbo","object":"model"}]} - -curl http://localhost:8080/v1/completions -H "Content-Type: application/json" -d '{ - "model": "gpt-3.5-turbo", - "prompt": "A long time ago in a galaxy far, far away", - "temperature": 0.7 -}' -``` \ No newline at end of file diff --git a/examples/langchain-huggingface/docker-compose.yml b/examples/langchain-huggingface/docker-compose.yml deleted file mode 100644 index 96ef540e..00000000 --- a/examples/langchain-huggingface/docker-compose.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - env_file: - - ../../.env - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai"] diff --git a/examples/langchain-huggingface/models b/examples/langchain-huggingface/models deleted file mode 120000 index 1e266b1b..00000000 --- a/examples/langchain-huggingface/models +++ /dev/null @@ -1 +0,0 @@ -../models \ No newline at end of file diff --git a/examples/langchain-python/README.md b/examples/langchain-python/README.md deleted file mode 100644 index aeff6c48..00000000 --- a/examples/langchain-python/README.md +++ /dev/null @@ -1,29 +0,0 @@ -## Langchain-python - -Langchain example from [quickstart](https://python.langchain.com/en/latest/getting_started/getting_started.html). - -To interact with langchain, you can just set the `OPENAI_API_BASE` URL and provide a token with a random string. - -See the example below: - -``` -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/langchain-python - -# start with docker-compose -docker-compose up --pull always - -pip install langchain -pip install openai - -export OPENAI_API_BASE=http://localhost:8080 -# Note: **OPENAI_API_KEY** is not required. However the library might fail if no API_KEY is passed by, so an arbitrary string can be used. -export OPENAI_API_KEY=sk- - -python test.py -# A good company name for a company that makes colorful socks would be "Colorsocks". - -python agent.py -``` \ No newline at end of file diff --git a/examples/langchain-python/agent.py b/examples/langchain-python/agent.py deleted file mode 100644 index 11e3a5ce..00000000 --- a/examples/langchain-python/agent.py +++ /dev/null @@ -1,44 +0,0 @@ -## This is a fork/based from https://gist.github.com/wiseman/4a706428eaabf4af1002a07a114f61d6 - -from io import StringIO -import sys -import os -from typing import Dict, Optional - -from langchain.agents import load_tools -from langchain.agents import initialize_agent -from langchain.agents.tools import Tool -from langchain.llms import OpenAI - -base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') -model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo') - -class PythonREPL: - """Simulates a standalone Python REPL.""" - - def __init__(self): - pass - - def run(self, command: str) -> str: - """Run command and returns anything printed.""" - old_stdout = sys.stdout - sys.stdout = mystdout = StringIO() - try: - exec(command, globals()) - sys.stdout = old_stdout - output = mystdout.getvalue() - except Exception as e: - sys.stdout = old_stdout - output = str(e) - return output - -llm = OpenAI(temperature=0.0, openai_api_base=base_path, model_name=model_name) -python_repl = Tool( - "Python REPL", - PythonREPL().run, - """A Python shell. Use this to execute python commands. Input should be a valid python command. - If you expect output it should be printed out.""", - ) -tools = [python_repl] -agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True) -agent.run("What is the 10th fibonacci number?") \ No newline at end of file diff --git a/examples/langchain-python/docker-compose.yaml b/examples/langchain-python/docker-compose.yaml deleted file mode 100644 index 0a023c07..00000000 --- a/examples/langchain-python/docker-compose.yaml +++ /dev/null @@ -1,27 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - # As initially LocalAI will download the models defined in PRELOAD_MODELS - # you might need to tweak the healthcheck values here according to your network connection. - # Here we give a timespan of 20m to download all the required files. - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] - interval: 1m - timeout: 20m - retries: 20 - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - environment: - - DEBUG=true - - MODELS_PATH=/models - # You can preload different models here as well. - # See: https://github.com/go-skynet/model-gallery - - 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]' - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai" ] \ No newline at end of file diff --git a/examples/langchain-python/test.py b/examples/langchain-python/test.py deleted file mode 100644 index a9fac351..00000000 --- a/examples/langchain-python/test.py +++ /dev/null @@ -1,6 +0,0 @@ - -from langchain.llms import OpenAI - -llm = OpenAI(temperature=0.9,model_name="gpt-3.5-turbo") -text = "What would be a good company name for a company that makes colorful socks?" -print(llm(text)) diff --git a/examples/langchain/.gitignore b/examples/langchain/.gitignore deleted file mode 100644 index 98850a54..00000000 --- a/examples/langchain/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -models/ggml-koala-13B-4bit-128g -models/ggml-gpt4all-j \ No newline at end of file diff --git a/examples/langchain/JS.Dockerfile b/examples/langchain/JS.Dockerfile deleted file mode 100644 index 29c20c3b..00000000 --- a/examples/langchain/JS.Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM node:lts-alpine -COPY ./langchainjs-localai-example /app -WORKDIR /app -RUN npm install -RUN npm run build -ENTRYPOINT [ "npm", "run", "start" ] diff --git a/examples/langchain/PY.Dockerfile b/examples/langchain/PY.Dockerfile deleted file mode 100644 index 865aec60..00000000 --- a/examples/langchain/PY.Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM python:3.13-bullseye -COPY ./langchainpy-localai-example /app -WORKDIR /app -RUN pip install --no-cache-dir -r requirements.txt -ENTRYPOINT [ "python", "./full_demo.py" ] diff --git a/examples/langchain/README.md b/examples/langchain/README.md deleted file mode 100644 index e84cfec5..00000000 --- a/examples/langchain/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# langchain - -Example of using langchain, with the standard OpenAI llm module, and LocalAI. Has docker compose profiles for both the Typescript and Python versions. - -**Please Note** - This is a tech demo example at this time. ggml-gpt4all-j has pretty terrible results for most langchain applications with the settings used in this example. - -## Setup - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/langchain - -# (optional) - Edit the example code in typescript. -# vi ./langchainjs-localai-example/index.ts - -# Download gpt4all-j to models/ -wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j - -# start with docker-compose for typescript! -docker-compose --profile ts up --build - -# or start with docker-compose for python! -docker-compose --profile py up --build -``` - -## Copyright - -Some of the example code in index.mts and full_demo.py is adapted from the langchainjs project and is Copyright (c) Harrison Chase. Used under the terms of the MIT license, as is the remainder of this code. \ No newline at end of file diff --git a/examples/langchain/docker-compose.yaml b/examples/langchain/docker-compose.yaml deleted file mode 100644 index 32564fef..00000000 --- a/examples/langchain/docker-compose.yaml +++ /dev/null @@ -1,43 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - environment: - - DEBUG=true - - MODELS_PATH=/models - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai" ] - - js: - build: - context: . - dockerfile: JS.Dockerfile - profiles: - - js - - ts - depends_on: - - "api" - environment: - - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' - - 'OPENAI_API_BASE=http://api:8080/v1' - - 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g' - - py: - build: - context: . - dockerfile: PY.Dockerfile - profiles: - - py - depends_on: - - "api" - environment: - - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' - - 'OPENAI_API_BASE=http://api:8080/v1' - - 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g' \ No newline at end of file diff --git a/examples/langchain/langchainjs-localai-example/.gitignore b/examples/langchain/langchainjs-localai-example/.gitignore deleted file mode 100644 index b9470778..00000000 --- a/examples/langchain/langchainjs-localai-example/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -node_modules/ -dist/ diff --git a/examples/langchain/langchainjs-localai-example/.vscode/launch.json b/examples/langchain/langchainjs-localai-example/.vscode/launch.json deleted file mode 100644 index 2ee41d63..00000000 --- a/examples/langchain/langchainjs-localai-example/.vscode/launch.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "type": "node", - "request": "launch", - "name": "Launch Program", - // "skipFiles": [ - // "/**" - // ], - "program": "${workspaceFolder}\\dist\\index.mjs", - "outFiles": [ - "${workspaceFolder}/**/*.js" - ] - } - ] -} \ No newline at end of file diff --git a/examples/langchain/langchainjs-localai-example/package-lock.json b/examples/langchain/langchainjs-localai-example/package-lock.json deleted file mode 100644 index 48fee285..00000000 --- a/examples/langchain/langchainjs-localai-example/package-lock.json +++ /dev/null @@ -1,3085 +0,0 @@ -{ - "name": "langchainjs-localai-example", - "version": "0.1.0", - "lockfileVersion": 2, - "requires": true, - "packages": { - "": { - "name": "langchainjs-localai-example", - "version": "0.1.0", - "license": "MIT", - "dependencies": { - "@langchain/community": "^0.0.52", - "@langchain/openai": "^0.0.28", - "langchain": "^0.1.36" - }, - "devDependencies": { - "@types/node": "^18.16.4", - "typescript": "^5.0.4" - } - }, - "node_modules/@anthropic-ai/sdk": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.9.1.tgz", - "integrity": "sha512-wa1meQ2WSfoY8Uor3EdrJq0jTiZJoKoSii2ZVWRY1oN4Tlr5s59pADg9T79FTbPe1/se5c3pBeZgJL63wmuoBA==", - "dependencies": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "digest-fetch": "^1.3.0", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7", - "web-streams-polyfill": "^3.2.1" - } - }, - "node_modules/@langchain/community": { - "version": "0.0.52", - "resolved": "https://registry.npmjs.org/@langchain/community/-/community-0.0.52.tgz", - "integrity": "sha512-L+IMAAaLNP7++4HhdvuVJegc8bdw8WP77Jvp98YcySFZTZWH1yasSQSlFn3jgBk+3xLBsudpTZuttKTrZ/TtVQ==", - "dependencies": { - "@langchain/core": "~0.1.60", - "@langchain/openai": "~0.0.28", - "expr-eval": "^2.0.2", - "flat": "^5.0.2", - "langsmith": "~0.1.1", - "uuid": "^9.0.0", - "zod": "^3.22.3", - "zod-to-json-schema": "^3.22.5" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@aws-crypto/sha256-js": "^5.0.0", - "@aws-sdk/client-bedrock-agent-runtime": "^3.485.0", - "@aws-sdk/client-bedrock-runtime": "^3.422.0", - "@aws-sdk/client-dynamodb": "^3.310.0", - "@aws-sdk/client-kendra": "^3.352.0", - "@aws-sdk/client-lambda": "^3.310.0", - "@aws-sdk/client-sagemaker-runtime": "^3.310.0", - "@aws-sdk/client-sfn": "^3.310.0", - "@aws-sdk/credential-provider-node": "^3.388.0", - "@azure/search-documents": "^12.0.0", - "@clickhouse/client": "^0.2.5", - "@cloudflare/ai": "*", - "@datastax/astra-db-ts": "^1.0.0", - "@elastic/elasticsearch": "^8.4.0", - "@getmetal/metal-sdk": "*", - "@getzep/zep-js": "^0.9.0", - "@gomomento/sdk": "^1.51.1", - "@gomomento/sdk-core": "^1.51.1", - "@google-ai/generativelanguage": "^0.2.1", - "@gradientai/nodejs-sdk": "^1.2.0", - "@huggingface/inference": "^2.6.4", - "@mozilla/readability": "*", - "@neondatabase/serverless": "*", - "@opensearch-project/opensearch": "*", - "@pinecone-database/pinecone": "*", - "@planetscale/database": "^1.8.0", - "@premai/prem-sdk": "^0.3.25", - "@qdrant/js-client-rest": "^1.2.0", - "@raycast/api": "^1.55.2", - "@rockset/client": "^0.9.1", - "@smithy/eventstream-codec": "^2.0.5", - "@smithy/protocol-http": "^3.0.6", - "@smithy/signature-v4": "^2.0.10", - "@smithy/util-utf8": "^2.0.0", - "@supabase/postgrest-js": "^1.1.1", - "@supabase/supabase-js": "^2.10.0", - "@tensorflow-models/universal-sentence-encoder": "*", - "@tensorflow/tfjs-converter": "*", - "@tensorflow/tfjs-core": "*", - "@upstash/redis": "^1.20.6", - "@upstash/vector": "^1.0.7", - "@vercel/kv": "^0.2.3", - "@vercel/postgres": "^0.5.0", - "@writerai/writer-sdk": "^0.40.2", - "@xata.io/client": "^0.28.0", - "@xenova/transformers": "^2.5.4", - "@zilliz/milvus2-sdk-node": ">=2.2.7", - "better-sqlite3": "^9.4.0", - "cassandra-driver": "^4.7.2", - "cborg": "^4.1.1", - "chromadb": "*", - "closevector-common": "0.1.3", - "closevector-node": "0.1.6", - "closevector-web": "0.1.6", - "cohere-ai": "*", - "convex": "^1.3.1", - "couchbase": "^4.3.0", - "discord.js": "^14.14.1", - "dria": "^0.0.3", - "duck-duck-scrape": "^2.2.5", - "faiss-node": "^0.5.1", - "firebase-admin": "^11.9.0 || ^12.0.0", - "google-auth-library": "^8.9.0", - "googleapis": "^126.0.1", - "hnswlib-node": "^3.0.0", - "html-to-text": "^9.0.5", - "interface-datastore": "^8.2.11", - "ioredis": "^5.3.2", - "it-all": "^3.0.4", - "jsdom": "*", - "jsonwebtoken": "^9.0.2", - "llmonitor": "^0.5.9", - "lodash": "^4.17.21", - "lunary": "^0.6.11", - "mongodb": ">=5.2.0", - "mysql2": "^3.3.3", - "neo4j-driver": "*", - "node-llama-cpp": "*", - "pg": "^8.11.0", - "pg-copy-streams": "^6.0.5", - "pickleparser": "^0.2.1", - "portkey-ai": "^0.1.11", - "redis": "*", - "replicate": "^0.18.0", - "typeorm": "^0.3.12", - "typesense": "^1.5.3", - "usearch": "^1.1.1", - "vectordb": "^0.1.4", - "voy-search": "0.6.2", - "weaviate-ts-client": "*", - "web-auth-library": "^1.0.3", - "ws": "^8.14.2" - }, - "peerDependenciesMeta": { - "@aws-crypto/sha256-js": { - "optional": true - }, - "@aws-sdk/client-bedrock-agent-runtime": { - "optional": true - }, - "@aws-sdk/client-bedrock-runtime": { - "optional": true - }, - "@aws-sdk/client-dynamodb": { - "optional": true - }, - "@aws-sdk/client-kendra": { - "optional": true - }, - "@aws-sdk/client-lambda": { - "optional": true - }, - "@aws-sdk/client-sagemaker-runtime": { - "optional": true - }, - "@aws-sdk/client-sfn": { - "optional": true - }, - "@aws-sdk/credential-provider-node": { - "optional": true - }, - "@azure/search-documents": { - "optional": true - }, - "@clickhouse/client": { - "optional": true - }, - "@cloudflare/ai": { - "optional": true - }, - "@datastax/astra-db-ts": { - "optional": true - }, - "@elastic/elasticsearch": { - "optional": true - }, - "@getmetal/metal-sdk": { - "optional": true - }, - "@getzep/zep-js": { - "optional": true - }, - "@gomomento/sdk": { - "optional": true - }, - "@gomomento/sdk-core": { - "optional": true - }, - "@google-ai/generativelanguage": { - "optional": true - }, - "@gradientai/nodejs-sdk": { - "optional": true - }, - "@huggingface/inference": { - "optional": true - }, - "@mozilla/readability": { - "optional": true - }, - "@neondatabase/serverless": { - "optional": true - }, - "@opensearch-project/opensearch": { - "optional": true - }, - "@pinecone-database/pinecone": { - "optional": true - }, - "@planetscale/database": { - "optional": true - }, - "@premai/prem-sdk": { - "optional": true - }, - "@qdrant/js-client-rest": { - "optional": true - }, - "@raycast/api": { - "optional": true - }, - "@rockset/client": { - "optional": true - }, - "@smithy/eventstream-codec": { - "optional": true - }, - "@smithy/protocol-http": { - "optional": true - }, - "@smithy/signature-v4": { - "optional": true - }, - "@smithy/util-utf8": { - "optional": true - }, - "@supabase/postgrest-js": { - "optional": true - }, - "@supabase/supabase-js": { - "optional": true - }, - "@tensorflow-models/universal-sentence-encoder": { - "optional": true - }, - "@tensorflow/tfjs-converter": { - "optional": true - }, - "@tensorflow/tfjs-core": { - "optional": true - }, - "@upstash/redis": { - "optional": true - }, - "@upstash/vector": { - "optional": true - }, - "@vercel/kv": { - "optional": true - }, - "@vercel/postgres": { - "optional": true - }, - "@writerai/writer-sdk": { - "optional": true - }, - "@xata.io/client": { - "optional": true - }, - "@xenova/transformers": { - "optional": true - }, - "@zilliz/milvus2-sdk-node": { - "optional": true - }, - "better-sqlite3": { - "optional": true - }, - "cassandra-driver": { - "optional": true - }, - "cborg": { - "optional": true - }, - "chromadb": { - "optional": true - }, - "closevector-common": { - "optional": true - }, - "closevector-node": { - "optional": true - }, - "closevector-web": { - "optional": true - }, - "cohere-ai": { - "optional": true - }, - "convex": { - "optional": true - }, - "couchbase": { - "optional": true - }, - "discord.js": { - "optional": true - }, - "dria": { - "optional": true - }, - "duck-duck-scrape": { - "optional": true - }, - "faiss-node": { - "optional": true - }, - "firebase-admin": { - "optional": true - }, - "google-auth-library": { - "optional": true - }, - "googleapis": { - "optional": true - }, - "hnswlib-node": { - "optional": true - }, - "html-to-text": { - "optional": true - }, - "interface-datastore": { - "optional": true - }, - "ioredis": { - "optional": true - }, - "it-all": { - "optional": true - }, - "jsdom": { - "optional": true - }, - "jsonwebtoken": { - "optional": true - }, - "llmonitor": { - "optional": true - }, - "lodash": { - "optional": true - }, - "lunary": { - "optional": true - }, - "mongodb": { - "optional": true - }, - "mysql2": { - "optional": true - }, - "neo4j-driver": { - "optional": true - }, - "node-llama-cpp": { - "optional": true - }, - "pg": { - "optional": true - }, - "pg-copy-streams": { - "optional": true - }, - "pickleparser": { - "optional": true - }, - "portkey-ai": { - "optional": true - }, - "redis": { - "optional": true - }, - "replicate": { - "optional": true - }, - "typeorm": { - "optional": true - }, - "typesense": { - "optional": true - }, - "usearch": { - "optional": true - }, - "vectordb": { - "optional": true - }, - "voy-search": { - "optional": true - }, - "weaviate-ts-client": { - "optional": true - }, - "web-auth-library": { - "optional": true - }, - "ws": { - "optional": true - } - } - }, - "node_modules/@langchain/core": { - "version": "0.1.60", - "resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.1.60.tgz", - "integrity": "sha512-3EJW4ir0tFe17AakpXCgO9flSoDjFELpSQs2w/CMZ5FBlHYxo3ODgVQAZvlHy97khEVgcnvlL3EDhPE7IdNibA==", - "dependencies": { - "ansi-styles": "^5.0.0", - "camelcase": "6", - "decamelize": "1.2.0", - "js-tiktoken": "^1.0.8", - "langsmith": "~0.1.7", - "ml-distance": "^4.0.0", - "mustache": "^4.2.0", - "p-queue": "^6.6.2", - "p-retry": "4", - "uuid": "^9.0.0", - "zod": "^3.22.4", - "zod-to-json-schema": "^3.22.3" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@langchain/openai": { - "version": "0.0.28", - "resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.0.28.tgz", - "integrity": "sha512-2s1RA3/eAnz4ahdzsMPBna9hfAqpFNlWdHiPxVGZ5yrhXsbLWWoPcF+22LCk9t0HJKtazi2GCIWc0HVXH9Abig==", - "dependencies": { - "@langchain/core": "~0.1.56", - "js-tiktoken": "^1.0.7", - "openai": "^4.32.1", - "zod": "^3.22.4", - "zod-to-json-schema": "^3.22.3" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@langchain/textsplitters": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/@langchain/textsplitters/-/textsplitters-0.0.0.tgz", - "integrity": "sha512-3hPesWomnmVeYMppEGYbyv0v/sRUugUdlFBNn9m1ueJYHAIKbvCErkWxNUH3guyKKYgJVrkvZoQxcd9faucSaw==", - "dependencies": { - "@langchain/core": "~0.1", - "js-tiktoken": "^1.0.11" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@sqltools/formatter": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/@sqltools/formatter/-/formatter-1.2.5.tgz", - "integrity": "sha512-Uy0+khmZqUrUGm5dmMqVlnvufZRSK0FbYzVgp0UMstm+F5+W2/jnEEQyc9vo1ZR/E5ZI/B1WjjoTqBqwJL6Krw==", - "optional": true, - "peer": true - }, - "node_modules/@types/node": { - "version": "18.16.4", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.16.4.tgz", - "integrity": "sha512-LUhvPmAKAbgm+p/K11IWszLZVoZDlMF4NRmqbhEzDz/CnCuehPkZXwZbBCKGJsgjnuVejotBwM7B3Scrq4EqDw==" - }, - "node_modules/@types/node-fetch": { - "version": "2.6.11", - "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", - "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", - "dependencies": { - "@types/node": "*", - "form-data": "^4.0.0" - } - }, - "node_modules/@types/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" - }, - "node_modules/@types/uuid": { - "version": "9.0.8", - "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.8.tgz", - "integrity": "sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==" - }, - "node_modules/abort-controller": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", - "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", - "dependencies": { - "event-target-shim": "^5.0.0" - }, - "engines": { - "node": ">=6.5" - } - }, - "node_modules/agentkeepalive": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", - "integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==", - "dependencies": { - "humanize-ms": "^1.2.1" - }, - "engines": { - "node": ">= 8.0.0" - } - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "optional": true, - "peer": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/any-promise": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", - "optional": true, - "peer": true - }, - "node_modules/app-root-path": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/app-root-path/-/app-root-path-3.1.0.tgz", - "integrity": "sha512-biN3PwB2gUtjaYy/isrU3aNWI5w+fAfvHkSvCKeQGxhmYpwKFUxudR3Yya+KqVRHBmEDYh+/lTozYCFbmzX4nA==", - "optional": true, - "peer": true, - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "optional": true, - "peer": true - }, - "node_modules/base-64": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/base-64/-/base-64-0.1.0.tgz", - "integrity": "sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==" - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/binary-search": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz", - "integrity": "sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA==" - }, - "node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "optional": true, - "peer": true, - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "optional": true, - "peer": true, - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, - "node_modules/camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "optional": true, - "peer": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/chalk/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "optional": true, - "peer": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/charenc": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/charenc/-/charenc-0.0.2.tgz", - "integrity": "sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==", - "engines": { - "node": "*" - } - }, - "node_modules/cli-highlight": { - "version": "2.1.11", - "resolved": "https://registry.npmjs.org/cli-highlight/-/cli-highlight-2.1.11.tgz", - "integrity": "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg==", - "optional": true, - "peer": true, - "dependencies": { - "chalk": "^4.0.0", - "highlight.js": "^10.7.1", - "mz": "^2.4.0", - "parse5": "^5.1.1", - "parse5-htmlparser2-tree-adapter": "^6.0.0", - "yargs": "^16.0.0" - }, - "bin": { - "highlight": "bin/highlight" - }, - "engines": { - "node": ">=8.0.0", - "npm": ">=5.0.0" - } - }, - "node_modules/cli-highlight/node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "optional": true, - "peer": true, - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "node_modules/cli-highlight/node_modules/yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "optional": true, - "peer": true, - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/cli-highlight/node_modules/yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "optional": true, - "peer": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "optional": true, - "peer": true, - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "optional": true, - "peer": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "optional": true, - "peer": true - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", - "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", - "engines": { - "node": ">=14" - } - }, - "node_modules/crypt": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz", - "integrity": "sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==", - "engines": { - "node": "*" - } - }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "optional": true, - "peer": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/digest-fetch": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/digest-fetch/-/digest-fetch-1.3.0.tgz", - "integrity": "sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==", - "dependencies": { - "base-64": "^0.1.0", - "md5": "^2.3.0" - } - }, - "node_modules/dotenv": { - "version": "16.0.3", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz", - "integrity": "sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==", - "optional": true, - "peer": true, - "engines": { - "node": ">=12" - } - }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "optional": true, - "peer": true - }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "optional": true, - "peer": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/event-target-shim": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", - "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" - }, - "node_modules/expr-eval": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/expr-eval/-/expr-eval-2.0.2.tgz", - "integrity": "sha512-4EMSHGOPSwAfBiibw3ndnP0AvjDWLsMvGOvWEZ2F96IGk0bIVdjQisOHxReSkE13mHcfbuCiXw+G4y0zv6N8Eg==" - }, - "node_modules/flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", - "bin": { - "flat": "cli.js" - } - }, - "node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/form-data-encoder": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", - "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==" - }, - "node_modules/formdata-node": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", - "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", - "dependencies": { - "node-domexception": "1.0.0", - "web-streams-polyfill": "4.0.0-beta.3" - }, - "engines": { - "node": ">= 12.20" - } - }, - "node_modules/formdata-node/node_modules/web-streams-polyfill": { - "version": "4.0.0-beta.3", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", - "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", - "engines": { - "node": ">= 14" - } - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "optional": true, - "peer": true - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "optional": true, - "peer": true, - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/glob": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", - "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", - "optional": true, - "peer": true, - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^5.0.1", - "once": "^1.3.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "optional": true, - "peer": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/highlight.js": { - "version": "10.7.3", - "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", - "integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==", - "optional": true, - "peer": true, - "engines": { - "node": "*" - } - }, - "node_modules/humanize-ms": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", - "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", - "dependencies": { - "ms": "^2.0.0" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "optional": true, - "peer": true - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "optional": true, - "peer": true, - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "optional": true, - "peer": true - }, - "node_modules/is-any-array": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-any-array/-/is-any-array-2.0.1.tgz", - "integrity": "sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ==" - }, - "node_modules/is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "optional": true, - "peer": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/js-tiktoken": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.11.tgz", - "integrity": "sha512-PajXFLq2vx7/8jllQZ43vzNpAai/0MOVdJjW/UrNyJorNQRTjHrqdGJG/mjHVy7h9M6dW6CaG43eNLMYFkTh6w==", - "dependencies": { - "base64-js": "^1.5.1" - } - }, - "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsonpointer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz", - "integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/langchain": { - "version": "0.1.36", - "resolved": "https://registry.npmjs.org/langchain/-/langchain-0.1.36.tgz", - "integrity": "sha512-NTbnCL/jKWIeEI//Nm1oG8nhW3vkYWvEMr1MPotmTThTfeKfO87eV/OAzAyh6Ruy6GFs/qofRgQZGIe6XvXTNQ==", - "dependencies": { - "@anthropic-ai/sdk": "^0.9.1", - "@langchain/community": "~0.0.47", - "@langchain/core": "~0.1.60", - "@langchain/openai": "~0.0.28", - "@langchain/textsplitters": "~0.0.0", - "binary-extensions": "^2.2.0", - "js-tiktoken": "^1.0.7", - "js-yaml": "^4.1.0", - "jsonpointer": "^5.0.1", - "langchainhub": "~0.0.8", - "langsmith": "~0.1.7", - "ml-distance": "^4.0.0", - "openapi-types": "^12.1.3", - "p-retry": "4", - "uuid": "^9.0.0", - "yaml": "^2.2.1", - "zod": "^3.22.4", - "zod-to-json-schema": "^3.22.3" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@aws-sdk/client-s3": "^3.310.0", - "@aws-sdk/client-sagemaker-runtime": "^3.310.0", - "@aws-sdk/client-sfn": "^3.310.0", - "@aws-sdk/credential-provider-node": "^3.388.0", - "@azure/storage-blob": "^12.15.0", - "@gomomento/sdk": "^1.51.1", - "@gomomento/sdk-core": "^1.51.1", - "@gomomento/sdk-web": "^1.51.1", - "@google-ai/generativelanguage": "^0.2.1", - "@google-cloud/storage": "^6.10.1 || ^7.7.0", - "@mendable/firecrawl-js": "^0.0.13", - "@notionhq/client": "^2.2.10", - "@pinecone-database/pinecone": "*", - "@supabase/supabase-js": "^2.10.0", - "@vercel/kv": "^0.2.3", - "@xata.io/client": "^0.28.0", - "apify-client": "^2.7.1", - "assemblyai": "^4.0.0", - "axios": "*", - "cheerio": "^1.0.0-rc.12", - "chromadb": "*", - "convex": "^1.3.1", - "couchbase": "^4.3.0", - "d3-dsv": "^2.0.0", - "epub2": "^3.0.1", - "fast-xml-parser": "*", - "google-auth-library": "^8.9.0", - "handlebars": "^4.7.8", - "html-to-text": "^9.0.5", - "ignore": "^5.2.0", - "ioredis": "^5.3.2", - "jsdom": "*", - "mammoth": "^1.6.0", - "mongodb": ">=5.2.0", - "node-llama-cpp": "*", - "notion-to-md": "^3.1.0", - "officeparser": "^4.0.4", - "pdf-parse": "1.1.1", - "peggy": "^3.0.2", - "playwright": "^1.32.1", - "puppeteer": "^19.7.2", - "pyodide": "^0.24.1", - "redis": "^4.6.4", - "sonix-speech-recognition": "^2.1.1", - "srt-parser-2": "^1.2.3", - "typeorm": "^0.3.12", - "weaviate-ts-client": "*", - "web-auth-library": "^1.0.3", - "ws": "^8.14.2", - "youtube-transcript": "^1.0.6", - "youtubei.js": "^9.1.0" - }, - "peerDependenciesMeta": { - "@aws-sdk/client-s3": { - "optional": true - }, - "@aws-sdk/client-sagemaker-runtime": { - "optional": true - }, - "@aws-sdk/client-sfn": { - "optional": true - }, - "@aws-sdk/credential-provider-node": { - "optional": true - }, - "@azure/storage-blob": { - "optional": true - }, - "@gomomento/sdk": { - "optional": true - }, - "@gomomento/sdk-core": { - "optional": true - }, - "@gomomento/sdk-web": { - "optional": true - }, - "@google-ai/generativelanguage": { - "optional": true - }, - "@google-cloud/storage": { - "optional": true - }, - "@mendable/firecrawl-js": { - "optional": true - }, - "@notionhq/client": { - "optional": true - }, - "@pinecone-database/pinecone": { - "optional": true - }, - "@supabase/supabase-js": { - "optional": true - }, - "@vercel/kv": { - "optional": true - }, - "@xata.io/client": { - "optional": true - }, - "apify-client": { - "optional": true - }, - "assemblyai": { - "optional": true - }, - "axios": { - "optional": true - }, - "cheerio": { - "optional": true - }, - "chromadb": { - "optional": true - }, - "convex": { - "optional": true - }, - "couchbase": { - "optional": true - }, - "d3-dsv": { - "optional": true - }, - "epub2": { - "optional": true - }, - "faiss-node": { - "optional": true - }, - "fast-xml-parser": { - "optional": true - }, - "google-auth-library": { - "optional": true - }, - "handlebars": { - "optional": true - }, - "html-to-text": { - "optional": true - }, - "ignore": { - "optional": true - }, - "ioredis": { - "optional": true - }, - "jsdom": { - "optional": true - }, - "mammoth": { - "optional": true - }, - "mongodb": { - "optional": true - }, - "node-llama-cpp": { - "optional": true - }, - "notion-to-md": { - "optional": true - }, - "officeparser": { - "optional": true - }, - "pdf-parse": { - "optional": true - }, - "peggy": { - "optional": true - }, - "playwright": { - "optional": true - }, - "puppeteer": { - "optional": true - }, - "pyodide": { - "optional": true - }, - "redis": { - "optional": true - }, - "sonix-speech-recognition": { - "optional": true - }, - "srt-parser-2": { - "optional": true - }, - "typeorm": { - "optional": true - }, - "weaviate-ts-client": { - "optional": true - }, - "web-auth-library": { - "optional": true - }, - "ws": { - "optional": true - }, - "youtube-transcript": { - "optional": true - }, - "youtubei.js": { - "optional": true - } - } - }, - "node_modules/langchainhub": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/langchainhub/-/langchainhub-0.0.8.tgz", - "integrity": "sha512-Woyb8YDHgqqTOZvWIbm2CaFDGfZ4NTSyXV687AG4vXEfoNo7cGQp7nhl7wL3ehenKWmNEmcxCLgOZzW8jE6lOQ==" - }, - "node_modules/langsmith": { - "version": "0.1.18", - "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.1.18.tgz", - "integrity": "sha512-LHk0aIFAl3/iiKvUzAiM8Xdm13bRO70XERQeHCF99fL2X815Jc47nxu6m7usSuQC8sw6rirCKZbGm18cqdUEzA==", - "dependencies": { - "@types/uuid": "^9.0.1", - "commander": "^10.0.1", - "p-queue": "^6.6.2", - "p-retry": "4", - "uuid": "^9.0.0" - } - }, - "node_modules/md5": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz", - "integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==", - "dependencies": { - "charenc": "0.0.2", - "crypt": "0.0.2", - "is-buffer": "~1.1.6" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "optional": true, - "peer": true, - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/mkdirp": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-2.1.6.tgz", - "integrity": "sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A==", - "optional": true, - "peer": true, - "bin": { - "mkdirp": "dist/cjs/src/bin.js" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/ml-array-mean": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/ml-array-mean/-/ml-array-mean-1.1.6.tgz", - "integrity": "sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ==", - "dependencies": { - "ml-array-sum": "^1.1.6" - } - }, - "node_modules/ml-array-sum": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/ml-array-sum/-/ml-array-sum-1.1.6.tgz", - "integrity": "sha512-29mAh2GwH7ZmiRnup4UyibQZB9+ZLyMShvt4cH4eTK+cL2oEMIZFnSyB3SS8MlsTh6q/w/yh48KmqLxmovN4Dw==", - "dependencies": { - "is-any-array": "^2.0.0" - } - }, - "node_modules/ml-distance": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/ml-distance/-/ml-distance-4.0.0.tgz", - "integrity": "sha512-zj7+UGZpHk3uL7n79XTfGNUjIGnhLn8xVvrxYvBHvXFxo3jq1q+/UjP311hZxnLVhbxbXCjUniThX8gozjacYA==", - "dependencies": { - "ml-array-mean": "^1.1.6", - "ml-distance-euclidean": "^2.0.0", - "ml-tree-similarity": "^1.0.0" - } - }, - "node_modules/ml-distance-euclidean": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ml-distance-euclidean/-/ml-distance-euclidean-2.0.0.tgz", - "integrity": "sha512-yC9/2o8QF0A3m/0IXqCTXCzz2pNEzvmcE/9HFKOZGnTjatvBbsn4lWYJkxENkA4Ug2fnYl7PXQxnPi21sgMy/Q==" - }, - "node_modules/ml-tree-similarity": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/ml-tree-similarity/-/ml-tree-similarity-1.0.0.tgz", - "integrity": "sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg==", - "dependencies": { - "binary-search": "^1.3.5", - "num-sort": "^2.0.0" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/mustache": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", - "integrity": "sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==", - "bin": { - "mustache": "bin/mustache" - } - }, - "node_modules/mz": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", - "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", - "optional": true, - "peer": true, - "dependencies": { - "any-promise": "^1.0.0", - "object-assign": "^4.0.1", - "thenify-all": "^1.0.0" - } - }, - "node_modules/node-domexception": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", - "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/jimmywarting" - }, - { - "type": "github", - "url": "https://paypal.me/jimmywarting" - } - ], - "engines": { - "node": ">=10.5.0" - } - }, - "node_modules/node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "dependencies": { - "whatwg-url": "^5.0.0" - }, - "engines": { - "node": "4.x || >=6.0.0" - }, - "peerDependencies": { - "encoding": "^0.1.0" - }, - "peerDependenciesMeta": { - "encoding": { - "optional": true - } - } - }, - "node_modules/num-sort": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/num-sort/-/num-sort-2.1.0.tgz", - "integrity": "sha512-1MQz1Ed8z2yckoBeSfkQHHO9K1yDRxxtotKSJ9yvcTUUxSvfvzEq5GwBrjjHEpMlq/k5gvXdmJ1SbYxWtpNoVg==", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "optional": true, - "peer": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "optional": true, - "peer": true, - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/openai": { - "version": "4.38.5", - "resolved": "https://registry.npmjs.org/openai/-/openai-4.38.5.tgz", - "integrity": "sha512-Ym5GJL98ZhLJJ7enBx53jjG3vwN/fsB+Ozh46nnRZZS9W1NiYqbwkJ+sXd3dkCIiWIgcyyOPL2Zr8SQAzbpj3g==", - "dependencies": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7", - "web-streams-polyfill": "^3.2.1" - }, - "bin": { - "openai": "bin/cli" - } - }, - "node_modules/openapi-types": { - "version": "12.1.3", - "resolved": "https://registry.npmjs.org/openapi-types/-/openapi-types-12.1.3.tgz", - "integrity": "sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==" - }, - "node_modules/p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", - "engines": { - "node": ">=4" - } - }, - "node_modules/p-queue": { - "version": "6.6.2", - "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", - "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", - "dependencies": { - "eventemitter3": "^4.0.4", - "p-timeout": "^3.2.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-retry": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", - "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", - "dependencies": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-timeout": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", - "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", - "dependencies": { - "p-finally": "^1.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/parse5": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-5.1.1.tgz", - "integrity": "sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==", - "optional": true, - "peer": true - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz", - "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==", - "optional": true, - "peer": true, - "dependencies": { - "parse5": "^6.0.1" - } - }, - "node_modules/parse5-htmlparser2-tree-adapter/node_modules/parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==", - "optional": true, - "peer": true - }, - "node_modules/reflect-metadata": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.1.13.tgz", - "integrity": "sha512-Ts1Y/anZELhSsjMcU605fU9RE4Oi3p5ORujwbIKXfWa+0Zxs510Qrmrce5/Jowq3cHSZSJqBjypxmHarc+vEWg==", - "optional": true, - "peer": true - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "optional": true, - "peer": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/retry": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "optional": true, - "peer": true - }, - "node_modules/sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", - "optional": true, - "peer": true, - "dependencies": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - }, - "bin": { - "sha.js": "bin.js" - } - }, - "node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "optional": true, - "peer": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "optional": true, - "peer": true, - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "optional": true, - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/thenify": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", - "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", - "optional": true, - "peer": true, - "dependencies": { - "any-promise": "^1.0.0" - } - }, - "node_modules/thenify-all": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", - "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", - "optional": true, - "peer": true, - "dependencies": { - "thenify": ">= 3.1.0 < 4" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" - }, - "node_modules/tslib": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz", - "integrity": "sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg==", - "optional": true, - "peer": true - }, - "node_modules/typeorm": { - "version": "0.3.15", - "resolved": "https://registry.npmjs.org/typeorm/-/typeorm-0.3.15.tgz", - "integrity": "sha512-R4JSw8QjDP1W+ypeRz/XrCXIqubrLSnNAzJAp9EQSQIPHTv+YmUHZis8g08lOwFpuhqL9m8jkPSz8GWEKlU/ow==", - "optional": true, - "peer": true, - "dependencies": { - "@sqltools/formatter": "^1.2.5", - "app-root-path": "^3.1.0", - "buffer": "^6.0.3", - "chalk": "^4.1.2", - "cli-highlight": "^2.1.11", - "debug": "^4.3.4", - "dotenv": "^16.0.3", - "glob": "^8.1.0", - "mkdirp": "^2.1.3", - "reflect-metadata": "^0.1.13", - "sha.js": "^2.4.11", - "tslib": "^2.5.0", - "uuid": "^9.0.0", - "yargs": "^17.6.2" - }, - "bin": { - "typeorm": "cli.js", - "typeorm-ts-node-commonjs": "cli-ts-node-commonjs.js", - "typeorm-ts-node-esm": "cli-ts-node-esm.js" - }, - "engines": { - "node": ">= 12.9.0" - }, - "funding": { - "url": "https://opencollective.com/typeorm" - }, - "peerDependencies": { - "@google-cloud/spanner": "^5.18.0", - "@sap/hana-client": "^2.12.25", - "better-sqlite3": "^7.1.2 || ^8.0.0", - "hdb-pool": "^0.1.6", - "ioredis": "^5.0.4", - "mongodb": "^5.2.0", - "mssql": "^9.1.1", - "mysql2": "^2.2.5 || ^3.0.1", - "oracledb": "^5.1.0", - "pg": "^8.5.1", - "pg-native": "^3.0.0", - "pg-query-stream": "^4.0.0", - "redis": "^3.1.1 || ^4.0.0", - "sql.js": "^1.4.0", - "sqlite3": "^5.0.3", - "ts-node": "^10.7.0", - "typeorm-aurora-data-api-driver": "^2.0.0" - }, - "peerDependenciesMeta": { - "@google-cloud/spanner": { - "optional": true - }, - "@sap/hana-client": { - "optional": true - }, - "better-sqlite3": { - "optional": true - }, - "hdb-pool": { - "optional": true - }, - "ioredis": { - "optional": true - }, - "mongodb": { - "optional": true - }, - "mssql": { - "optional": true - }, - "mysql2": { - "optional": true - }, - "oracledb": { - "optional": true - }, - "pg": { - "optional": true - }, - "pg-native": { - "optional": true - }, - "pg-query-stream": { - "optional": true - }, - "redis": { - "optional": true - }, - "sql.js": { - "optional": true - }, - "sqlite3": { - "optional": true - }, - "ts-node": { - "optional": true - }, - "typeorm-aurora-data-api-driver": { - "optional": true - } - } - }, - "node_modules/typescript": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.4.tgz", - "integrity": "sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==", - "dev": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=12.20" - } - }, - "node_modules/uuid": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.0.tgz", - "integrity": "sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==", - "bin": { - "uuid": "dist/bin/uuid" - } - }, - "node_modules/web-streams-polyfill": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", - "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" - }, - "node_modules/whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "dependencies": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "optional": true, - "peer": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "optional": true, - "peer": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "optional": true, - "peer": true - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "optional": true, - "peer": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/yaml": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.2.2.tgz", - "integrity": "sha512-CBKFWExMn46Foo4cldiChEzn7S7SRV+wqiluAb6xmueD/fGyRHIhX8m14vVGgeFWjN540nKCNVj6P21eQjgTuA==", - "engines": { - "node": ">= 14" - } - }, - "node_modules/yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "optional": true, - "peer": true, - "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "optional": true, - "peer": true, - "engines": { - "node": ">=12" - } - }, - "node_modules/zod": { - "version": "3.23.4", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.4.tgz", - "integrity": "sha512-/AtWOKbBgjzEYYQRNfoGKHObgfAZag6qUJX1VbHo2PRBgS+wfWagEY2mizjfyAPcGesrJOcx/wcl0L9WnVrHFw==", - "funding": { - "url": "https://github.com/sponsors/colinhacks" - } - }, - "node_modules/zod-to-json-schema": { - "version": "3.23.0", - "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.23.0.tgz", - "integrity": "sha512-az0uJ243PxsRIa2x1WmNE/pnuA05gUq/JB8Lwe1EDCCL/Fz9MgjYQ0fPlyc2Tcv6aF2ZA7WM5TWaRZVEFaAIag==", - "peerDependencies": { - "zod": "^3.23.3" - } - } - }, - "dependencies": { - "@anthropic-ai/sdk": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.9.1.tgz", - "integrity": "sha512-wa1meQ2WSfoY8Uor3EdrJq0jTiZJoKoSii2ZVWRY1oN4Tlr5s59pADg9T79FTbPe1/se5c3pBeZgJL63wmuoBA==", - "requires": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "digest-fetch": "^1.3.0", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7", - "web-streams-polyfill": "^3.2.1" - } - }, - "@langchain/community": { - "version": "0.0.52", - "resolved": "https://registry.npmjs.org/@langchain/community/-/community-0.0.52.tgz", - "integrity": "sha512-L+IMAAaLNP7++4HhdvuVJegc8bdw8WP77Jvp98YcySFZTZWH1yasSQSlFn3jgBk+3xLBsudpTZuttKTrZ/TtVQ==", - "requires": { - "@langchain/core": "0.1.5", - "@langchain/openai": "~0.0.28", - "expr-eval": "^2.0.2", - "flat": "^5.0.2", - "langsmith": "~0.1.1", - "uuid": "^9.0.0", - "zod": "^3.22.3", - "zod-to-json-schema": "^3.22.5" - } - }, - "@langchain/core": { - "version": "0.1.60", - "resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.1.60.tgz", - "integrity": "sha512-3EJW4ir0tFe17AakpXCgO9flSoDjFELpSQs2w/CMZ5FBlHYxo3ODgVQAZvlHy97khEVgcnvlL3EDhPE7IdNibA==", - "requires": { - "ansi-styles": "^5.0.0", - "camelcase": "6", - "decamelize": "1.2.0", - "js-tiktoken": "^1.0.8", - "langsmith": "~0.1.7", - "ml-distance": "^4.0.0", - "mustache": "^4.2.0", - "p-queue": "^6.6.2", - "p-retry": "4", - "uuid": "^9.0.0", - "zod": "^3.22.4", - "zod-to-json-schema": "^3.22.3" - } - }, - "@langchain/openai": { - "version": "0.0.28", - "resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.0.28.tgz", - "integrity": "sha512-2s1RA3/eAnz4ahdzsMPBna9hfAqpFNlWdHiPxVGZ5yrhXsbLWWoPcF+22LCk9t0HJKtazi2GCIWc0HVXH9Abig==", - "requires": { - "@langchain/core": "0.1.5", - "js-tiktoken": "^1.0.7", - "openai": "^4.32.1", - "zod": "^3.22.4", - "zod-to-json-schema": "^3.22.3" - } - }, - "@langchain/textsplitters": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/@langchain/textsplitters/-/textsplitters-0.0.0.tgz", - "integrity": "sha512-3hPesWomnmVeYMppEGYbyv0v/sRUugUdlFBNn9m1ueJYHAIKbvCErkWxNUH3guyKKYgJVrkvZoQxcd9faucSaw==", - "requires": { - "@langchain/core": "~0.1", - "js-tiktoken": "^1.0.11" - } - }, - "@sqltools/formatter": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/@sqltools/formatter/-/formatter-1.2.5.tgz", - "integrity": "sha512-Uy0+khmZqUrUGm5dmMqVlnvufZRSK0FbYzVgp0UMstm+F5+W2/jnEEQyc9vo1ZR/E5ZI/B1WjjoTqBqwJL6Krw==", - "optional": true, - "peer": true - }, - "@types/node": { - "version": "18.16.4", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.16.4.tgz", - "integrity": "sha512-LUhvPmAKAbgm+p/K11IWszLZVoZDlMF4NRmqbhEzDz/CnCuehPkZXwZbBCKGJsgjnuVejotBwM7B3Scrq4EqDw==" - }, - "@types/node-fetch": { - "version": "2.6.11", - "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", - "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", - "requires": { - "@types/node": "*", - "form-data": "^4.0.0" - } - }, - "@types/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" - }, - "@types/uuid": { - "version": "9.0.8", - "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.8.tgz", - "integrity": "sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==" - }, - "abort-controller": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", - "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", - "requires": { - "event-target-shim": "^5.0.0" - } - }, - "agentkeepalive": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", - "integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==", - "requires": { - "humanize-ms": "^1.2.1" - } - }, - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "optional": true, - "peer": true - }, - "ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==" - }, - "any-promise": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", - "optional": true, - "peer": true - }, - "app-root-path": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/app-root-path/-/app-root-path-3.1.0.tgz", - "integrity": "sha512-biN3PwB2gUtjaYy/isrU3aNWI5w+fAfvHkSvCKeQGxhmYpwKFUxudR3Yya+KqVRHBmEDYh+/lTozYCFbmzX4nA==", - "optional": true, - "peer": true - }, - "argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "optional": true, - "peer": true - }, - "base-64": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/base-64/-/base-64-0.1.0.tgz", - "integrity": "sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==" - }, - "base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" - }, - "binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==" - }, - "binary-search": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz", - "integrity": "sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA==" - }, - "brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "optional": true, - "peer": true, - "requires": { - "balanced-match": "^1.0.0" - } - }, - "buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", - "optional": true, - "peer": true, - "requires": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, - "camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==" - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "optional": true, - "peer": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "optional": true, - "peer": true, - "requires": { - "color-convert": "^2.0.1" - } - } - } - }, - "charenc": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/charenc/-/charenc-0.0.2.tgz", - "integrity": "sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==" - }, - "cli-highlight": { - "version": "2.1.11", - "resolved": "https://registry.npmjs.org/cli-highlight/-/cli-highlight-2.1.11.tgz", - "integrity": "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg==", - "optional": true, - "peer": true, - "requires": { - "chalk": "^4.0.0", - "highlight.js": "^10.7.1", - "mz": "^2.4.0", - "parse5": "^5.1.1", - "parse5-htmlparser2-tree-adapter": "^6.0.0", - "yargs": "^16.0.0" - }, - "dependencies": { - "cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "optional": true, - "peer": true, - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "optional": true, - "peer": true, - "requires": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - } - }, - "yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "optional": true, - "peer": true - } - } - }, - "cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "optional": true, - "peer": true, - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "optional": true, - "peer": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "optional": true, - "peer": true - }, - "combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "commander": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", - "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==" - }, - "crypt": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz", - "integrity": "sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==" - }, - "debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "optional": true, - "peer": true, - "requires": { - "ms": "2.1.2" - } - }, - "decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==" - }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==" - }, - "digest-fetch": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/digest-fetch/-/digest-fetch-1.3.0.tgz", - "integrity": "sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==", - "requires": { - "base-64": "^0.1.0", - "md5": "^2.3.0" - } - }, - "dotenv": { - "version": "16.0.3", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz", - "integrity": "sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==", - "optional": true, - "peer": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "optional": true, - "peer": true - }, - "escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "optional": true, - "peer": true - }, - "event-target-shim": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", - "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==" - }, - "eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" - }, - "expr-eval": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/expr-eval/-/expr-eval-2.0.2.tgz", - "integrity": "sha512-4EMSHGOPSwAfBiibw3ndnP0AvjDWLsMvGOvWEZ2F96IGk0bIVdjQisOHxReSkE13mHcfbuCiXw+G4y0zv6N8Eg==" - }, - "flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==" - }, - "form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - } - }, - "form-data-encoder": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", - "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==" - }, - "formdata-node": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", - "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", - "requires": { - "node-domexception": "1.0.0", - "web-streams-polyfill": "4.0.0-beta.3" - }, - "dependencies": { - "web-streams-polyfill": { - "version": "4.0.0-beta.3", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", - "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==" - } - } - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "optional": true, - "peer": true - }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "optional": true, - "peer": true - }, - "glob": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", - "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", - "optional": true, - "peer": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^5.0.1", - "once": "^1.3.0" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "optional": true, - "peer": true - }, - "highlight.js": { - "version": "10.7.3", - "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", - "integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==", - "optional": true, - "peer": true - }, - "humanize-ms": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", - "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", - "requires": { - "ms": "^2.0.0" - } - }, - "ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "optional": true, - "peer": true - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "optional": true, - "peer": true, - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "optional": true, - "peer": true - }, - "is-any-array": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-any-array/-/is-any-array-2.0.1.tgz", - "integrity": "sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ==" - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "optional": true, - "peer": true - }, - "js-tiktoken": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.11.tgz", - "integrity": "sha512-PajXFLq2vx7/8jllQZ43vzNpAai/0MOVdJjW/UrNyJorNQRTjHrqdGJG/mjHVy7h9M6dW6CaG43eNLMYFkTh6w==", - "requires": { - "base64-js": "^1.5.1" - } - }, - "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "requires": { - "argparse": "^2.0.1" - } - }, - "jsonpointer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz", - "integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==" - }, - "langchain": { - "version": "0.1.36", - "resolved": "https://registry.npmjs.org/langchain/-/langchain-0.1.36.tgz", - "integrity": "sha512-NTbnCL/jKWIeEI//Nm1oG8nhW3vkYWvEMr1MPotmTThTfeKfO87eV/OAzAyh6Ruy6GFs/qofRgQZGIe6XvXTNQ==", - "requires": { - "@anthropic-ai/sdk": "^0.9.1", - "@langchain/community": "~0.0.47", - "@langchain/core": "0.1.5", - "@langchain/openai": "~0.0.28", - "@langchain/textsplitters": "~0.0.0", - "binary-extensions": "^2.2.0", - "js-tiktoken": "^1.0.7", - "js-yaml": "^4.1.0", - "jsonpointer": "^5.0.1", - "langchainhub": "~0.0.8", - "langsmith": "~0.1.7", - "ml-distance": "^4.0.0", - "openapi-types": "^12.1.3", - "p-retry": "4", - "uuid": "^9.0.0", - "yaml": "^2.2.1", - "zod": "^3.22.4", - "zod-to-json-schema": "^3.22.3" - } - }, - "langchainhub": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/langchainhub/-/langchainhub-0.0.8.tgz", - "integrity": "sha512-Woyb8YDHgqqTOZvWIbm2CaFDGfZ4NTSyXV687AG4vXEfoNo7cGQp7nhl7wL3ehenKWmNEmcxCLgOZzW8jE6lOQ==" - }, - "langsmith": { - "version": "0.1.18", - "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.1.18.tgz", - "integrity": "sha512-LHk0aIFAl3/iiKvUzAiM8Xdm13bRO70XERQeHCF99fL2X815Jc47nxu6m7usSuQC8sw6rirCKZbGm18cqdUEzA==", - "requires": { - "@types/uuid": "^9.0.1", - "commander": "^10.0.1", - "p-queue": "^6.6.2", - "p-retry": "4", - "uuid": "^9.0.0" - } - }, - "md5": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz", - "integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==", - "requires": { - "charenc": "0.0.2", - "crypt": "0.0.2", - "is-buffer": "~1.1.6" - } - }, - "mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" - }, - "mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "requires": { - "mime-db": "1.52.0" - } - }, - "minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "optional": true, - "peer": true, - "requires": { - "brace-expansion": "^2.0.1" - } - }, - "mkdirp": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-2.1.6.tgz", - "integrity": "sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A==", - "optional": true, - "peer": true - }, - "ml-array-mean": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/ml-array-mean/-/ml-array-mean-1.1.6.tgz", - "integrity": "sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ==", - "requires": { - "ml-array-sum": "^1.1.6" - } - }, - "ml-array-sum": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/ml-array-sum/-/ml-array-sum-1.1.6.tgz", - "integrity": "sha512-29mAh2GwH7ZmiRnup4UyibQZB9+ZLyMShvt4cH4eTK+cL2oEMIZFnSyB3SS8MlsTh6q/w/yh48KmqLxmovN4Dw==", - "requires": { - "is-any-array": "^2.0.0" - } - }, - "ml-distance": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/ml-distance/-/ml-distance-4.0.0.tgz", - "integrity": "sha512-zj7+UGZpHk3uL7n79XTfGNUjIGnhLn8xVvrxYvBHvXFxo3jq1q+/UjP311hZxnLVhbxbXCjUniThX8gozjacYA==", - "requires": { - "ml-array-mean": "^1.1.6", - "ml-distance-euclidean": "^2.0.0", - "ml-tree-similarity": "^1.0.0" - } - }, - "ml-distance-euclidean": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ml-distance-euclidean/-/ml-distance-euclidean-2.0.0.tgz", - "integrity": "sha512-yC9/2o8QF0A3m/0IXqCTXCzz2pNEzvmcE/9HFKOZGnTjatvBbsn4lWYJkxENkA4Ug2fnYl7PXQxnPi21sgMy/Q==" - }, - "ml-tree-similarity": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/ml-tree-similarity/-/ml-tree-similarity-1.0.0.tgz", - "integrity": "sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg==", - "requires": { - "binary-search": "^1.3.5", - "num-sort": "^2.0.0" - } - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "mustache": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", - "integrity": "sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==" - }, - "mz": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", - "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", - "optional": true, - "peer": true, - "requires": { - "any-promise": "^1.0.0", - "object-assign": "^4.0.1", - "thenify-all": "^1.0.0" - } - }, - "node-domexception": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", - "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==" - }, - "node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "requires": { - "whatwg-url": "^5.0.0" - } - }, - "num-sort": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/num-sort/-/num-sort-2.1.0.tgz", - "integrity": "sha512-1MQz1Ed8z2yckoBeSfkQHHO9K1yDRxxtotKSJ9yvcTUUxSvfvzEq5GwBrjjHEpMlq/k5gvXdmJ1SbYxWtpNoVg==" - }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "optional": true, - "peer": true - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "optional": true, - "peer": true, - "requires": { - "wrappy": "1" - } - }, - "openai": { - "version": "4.38.5", - "resolved": "https://registry.npmjs.org/openai/-/openai-4.38.5.tgz", - "integrity": "sha512-Ym5GJL98ZhLJJ7enBx53jjG3vwN/fsB+Ozh46nnRZZS9W1NiYqbwkJ+sXd3dkCIiWIgcyyOPL2Zr8SQAzbpj3g==", - "requires": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7", - "web-streams-polyfill": "^3.2.1" - } - }, - "openapi-types": { - "version": "12.1.3", - "resolved": "https://registry.npmjs.org/openapi-types/-/openapi-types-12.1.3.tgz", - "integrity": "sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==" - }, - "p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==" - }, - "p-queue": { - "version": "6.6.2", - "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", - "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", - "requires": { - "eventemitter3": "^4.0.4", - "p-timeout": "^3.2.0" - } - }, - "p-retry": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", - "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", - "requires": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - } - }, - "p-timeout": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", - "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", - "requires": { - "p-finally": "^1.0.0" - } - }, - "parse5": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-5.1.1.tgz", - "integrity": "sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==", - "optional": true, - "peer": true - }, - "parse5-htmlparser2-tree-adapter": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz", - "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==", - "optional": true, - "peer": true, - "requires": { - "parse5": "^6.0.1" - }, - "dependencies": { - "parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==", - "optional": true, - "peer": true - } - } - }, - "reflect-metadata": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.1.13.tgz", - "integrity": "sha512-Ts1Y/anZELhSsjMcU605fU9RE4Oi3p5ORujwbIKXfWa+0Zxs510Qrmrce5/Jowq3cHSZSJqBjypxmHarc+vEWg==", - "optional": true, - "peer": true - }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "optional": true, - "peer": true - }, - "retry": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==" - }, - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "optional": true, - "peer": true - }, - "sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", - "optional": true, - "peer": true, - "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "optional": true, - "peer": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } - }, - "strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "optional": true, - "peer": true, - "requires": { - "ansi-regex": "^5.0.1" - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "optional": true, - "peer": true, - "requires": { - "has-flag": "^4.0.0" - } - }, - "thenify": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", - "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", - "optional": true, - "peer": true, - "requires": { - "any-promise": "^1.0.0" - } - }, - "thenify-all": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", - "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", - "optional": true, - "peer": true, - "requires": { - "thenify": ">= 3.1.0 < 4" - } - }, - "tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" - }, - "tslib": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz", - "integrity": "sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg==", - "optional": true, - "peer": true - }, - "typeorm": { - "version": "0.3.15", - "resolved": "https://registry.npmjs.org/typeorm/-/typeorm-0.3.15.tgz", - "integrity": "sha512-R4JSw8QjDP1W+ypeRz/XrCXIqubrLSnNAzJAp9EQSQIPHTv+YmUHZis8g08lOwFpuhqL9m8jkPSz8GWEKlU/ow==", - "optional": true, - "peer": true, - "requires": { - "@sqltools/formatter": "^1.2.5", - "app-root-path": "^3.1.0", - "buffer": "^6.0.3", - "chalk": "^4.1.2", - "cli-highlight": "^2.1.11", - "debug": "^4.3.4", - "dotenv": "^16.0.3", - "glob": "^8.1.0", - "mkdirp": "^2.1.3", - "reflect-metadata": "^0.1.13", - "sha.js": "^2.4.11", - "tslib": "^2.5.0", - "uuid": "^9.0.0", - "yargs": "^17.6.2" - } - }, - "typescript": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.4.tgz", - "integrity": "sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==", - "dev": true - }, - "uuid": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.0.tgz", - "integrity": "sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==" - }, - "web-streams-polyfill": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", - "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==" - }, - "webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" - }, - "whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "requires": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, - "wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "optional": true, - "peer": true, - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "optional": true, - "peer": true, - "requires": { - "color-convert": "^2.0.1" - } - } - } - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "optional": true, - "peer": true - }, - "y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "optional": true, - "peer": true - }, - "yaml": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.2.2.tgz", - "integrity": "sha512-CBKFWExMn46Foo4cldiChEzn7S7SRV+wqiluAb6xmueD/fGyRHIhX8m14vVGgeFWjN540nKCNVj6P21eQjgTuA==" - }, - "yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "optional": true, - "peer": true, - "requires": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - } - }, - "yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "optional": true, - "peer": true - }, - "zod": { - "version": "3.23.4", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.4.tgz", - "integrity": "sha512-/AtWOKbBgjzEYYQRNfoGKHObgfAZag6qUJX1VbHo2PRBgS+wfWagEY2mizjfyAPcGesrJOcx/wcl0L9WnVrHFw==" - }, - "zod-to-json-schema": { - "version": "3.23.0", - "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.23.0.tgz", - "integrity": "sha512-az0uJ243PxsRIa2x1WmNE/pnuA05gUq/JB8Lwe1EDCCL/Fz9MgjYQ0fPlyc2Tcv6aF2ZA7WM5TWaRZVEFaAIag==", - "requires": {} - } - } -} diff --git a/examples/langchain/langchainjs-localai-example/package.json b/examples/langchain/langchainjs-localai-example/package.json deleted file mode 100644 index 1d65575d..00000000 --- a/examples/langchain/langchainjs-localai-example/package.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "name": "langchainjs-localai-example", - "version": "0.1.1", - "description": "Trivial Example of using langchain + the OpenAI API + LocalAI together", - "main": "index.mjs", - "scripts": { - "build": "tsc --build", - "clean": "tsc --build --clean", - "start": "node --trace-warnings dist/index.mjs" - }, - "author": "dave@gray101.com", - "license": "MIT", - "devDependencies": { - "@types/node": "^18.16.4", - "typescript": "^5.0.4" - }, - "dependencies": { - "@langchain/community": "^0.0.52", - "@langchain/openai": "^0.0.28", - "langchain": "^0.1.36" - }, - "overrides": { - "@langchain/core": "0.1.5" - } -} diff --git a/examples/langchain/langchainjs-localai-example/src/index.mts b/examples/langchain/langchainjs-localai-example/src/index.mts deleted file mode 100644 index 995c2832..00000000 --- a/examples/langchain/langchainjs-localai-example/src/index.mts +++ /dev/null @@ -1,92 +0,0 @@ -import { loadQAStuffChain } from "langchain/chains"; -import { Document } from "langchain/document"; -import { pull } from "langchain/hub"; -import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents"; -import {Calculator} from "@langchain/community/tools/calculator"; -import { ChatOpenAI } from "@langchain/openai"; -import type { ChatPromptTemplate } from "@langchain/core/prompts"; - -const pathToLocalAI = process.env['OPENAI_API_BASE'] || 'http://api:8080/v1'; -const fakeApiKey = process.env['OPENAI_API_KEY'] || '-'; -const modelName = process.env['MODEL_NAME'] || 'gpt-3.5-turbo'; - -function getModel(): ChatOpenAI { - return new ChatOpenAI({ - prefixMessages: [ - { - role: "system", - content: "You are a helpful assistant that answers in pirate language", - }, - ], - modelName: modelName, - maxTokens: 50, - openAIApiKey: fakeApiKey, - maxRetries: 2 - }, { - basePath: pathToLocalAI, - apiKey: fakeApiKey, - }); -} - -// Minimal example. -export const run = async () => { - const model = getModel(); - console.log(`about to model.invoke at ${new Date().toUTCString()}`); - const res = await model.invoke( - "What would be a good company name a company that makes colorful socks?" - ); - console.log(`${new Date().toUTCString()}`); - console.log({ res }); -}; - -await run(); - -// This example uses the `StuffDocumentsChain` -export const run2 = async () => { - const model = getModel(); - const chainA = loadQAStuffChain(model); - const docs = [ - new Document({ pageContent: "Harrison went to Harvard." }), - new Document({ pageContent: "Ankush went to Princeton." }), - ]; - const resA = await chainA.invoke({ - input_documents: docs, - question: "Where did Harrison go to college?", - }); - console.log({ resA }); -}; - -await run2(); - -// Quickly thrown together example of using tools + agents. -// This seems like it should work, but it doesn't yet. -export const toolAgentTest = async () => { - const model = getModel(); - - const prompt = await pull("hwchase17/openai-tools-agent"); - - const tools = [new Calculator()]; - - const agent = await createOpenAIToolsAgent({ - llm: model, - tools: tools, - prompt: prompt - }); - - console.log("Loaded agent."); - - const agentExecutor = new AgentExecutor({ - agent, - tools, - }); - - const input = `What is the value of (500 *2) + 350 - 13?`; - - console.log(`Executing with input "${input}"...`); - - const result = await agentExecutor.invoke({ input }); - - console.log(`Got output ${result.output}`); -} - -await toolAgentTest(); diff --git a/examples/langchain/langchainjs-localai-example/tsconfig.json b/examples/langchain/langchainjs-localai-example/tsconfig.json deleted file mode 100644 index 5e0d5a58..00000000 --- a/examples/langchain/langchainjs-localai-example/tsconfig.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "compilerOptions": { - "target": "es2022", - "lib": ["ES2022", "DOM"], - "module": "ES2022", - "moduleResolution": "node", - "strict": true, - "esModuleInterop": true, - "allowSyntheticDefaultImports": true, - "isolatedModules": true, - "outDir": "./dist", - "skipLibCheck": true - }, - "include": ["src", "test"], - "exclude": ["node_modules", "dist"] -} diff --git a/examples/langchain/langchainpy-localai-example/.vscode/launch.json b/examples/langchain/langchainpy-localai-example/.vscode/launch.json deleted file mode 100644 index e72fa799..00000000 --- a/examples/langchain/langchainpy-localai-example/.vscode/launch.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "version": "0.2.0", - "configurations": [ - { - "name": "Python: Current File", - "type": "python", - "request": "launch", - "program": "${file}", - "console": "integratedTerminal", - "redirectOutput": true, - "justMyCode": false - }, - { - "name": "Python: Attach to Port 5678", - "type": "python", - "request": "attach", - "connect": { - "host": "localhost", - "port": 5678 - }, - "justMyCode": false - } - ] -} \ No newline at end of file diff --git a/examples/langchain/langchainpy-localai-example/.vscode/settings.json b/examples/langchain/langchainpy-localai-example/.vscode/settings.json deleted file mode 100644 index 146756d1..00000000 --- a/examples/langchain/langchainpy-localai-example/.vscode/settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "python.defaultInterpreterPath": "${workspaceFolder}/.venv/Scripts/python" -} \ No newline at end of file diff --git a/examples/langchain/langchainpy-localai-example/full_demo.py b/examples/langchain/langchainpy-localai-example/full_demo.py deleted file mode 100644 index 52271b67..00000000 --- a/examples/langchain/langchainpy-localai-example/full_demo.py +++ /dev/null @@ -1,46 +0,0 @@ -import os -import logging - -from langchain.chat_models import ChatOpenAI -from langchain import PromptTemplate, LLMChain -from langchain.prompts.chat import ( - ChatPromptTemplate, - SystemMessagePromptTemplate, - AIMessagePromptTemplate, - HumanMessagePromptTemplate, -) -from langchain.schema import ( - AIMessage, - HumanMessage, - SystemMessage -) - -# This logging incantation makes it easy to see that you're actually reaching your LocalAI instance rather than OpenAI. -logging.basicConfig(level=logging.DEBUG) - -print('Langchain + LocalAI PYTHON Tests') - -base_path = os.environ.get('OPENAI_API_BASE', 'http://api:8080/v1') -key = os.environ.get('OPENAI_API_KEY', '-') -model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo') - - -chat = ChatOpenAI(temperature=0, openai_api_base=base_path, openai_api_key=key, model_name=model_name, max_tokens=100) - -print("Created ChatOpenAI for ", chat.model_name) - -template = "You are a helpful assistant that translates {input_language} to {output_language}. The next message will be a sentence in {input_language}. Respond ONLY with the translation in {output_language}. Do not respond in {input_language}!" -system_message_prompt = SystemMessagePromptTemplate.from_template(template) -human_template = "{text}" -human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) - -chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) - -print("ABOUT to execute") - -# get a chat completion from the formatted messages -response = chat(chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages()) - -print(response) - -print("."); \ No newline at end of file diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt deleted file mode 100644 index 1a45c6ac..00000000 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ /dev/null @@ -1,33 +0,0 @@ -aiohttp==3.10.10 -aiosignal==1.3.1 -async-timeout==4.0.3 -attrs==24.2.0 -certifi==2024.8.30 -charset-normalizer==3.4.0 -colorama==0.4.6 -dataclasses-json==0.6.7 -debugpy==1.8.7 -frozenlist==1.5.0 -greenlet==3.1.1 -idna==3.10 -langchain==0.3.3 -langchain-community==0.3.3 -marshmallow==3.23.0 -marshmallow-enum==1.5.1 -multidict==6.1.0 -mypy-extensions==1.0.0 -numexpr==2.10.1 -numpy==2.1.2 -openai==1.52.2 -openapi-schema-pydantic==1.2.4 -packaging>=23.2 -pydantic==2.9.2 -PyYAML==6.0.2 -requests==2.32.3 -SQLAlchemy==2.0.36 -tenacity==8.5.0 -tqdm==4.66.6 -typing-inspect==0.9.0 -typing_extensions==4.12.2 -urllib3==2.2.3 -yarl==1.16.0 diff --git a/examples/langchain/langchainpy-localai-example/simple_demo.py b/examples/langchain/langchainpy-localai-example/simple_demo.py deleted file mode 100644 index a9fac351..00000000 --- a/examples/langchain/langchainpy-localai-example/simple_demo.py +++ /dev/null @@ -1,6 +0,0 @@ - -from langchain.llms import OpenAI - -llm = OpenAI(temperature=0.9,model_name="gpt-3.5-turbo") -text = "What would be a good company name for a company that makes colorful socks?" -print(llm(text)) diff --git a/examples/langchain/models b/examples/langchain/models deleted file mode 120000 index 1e266b1b..00000000 --- a/examples/langchain/models +++ /dev/null @@ -1 +0,0 @@ -../models \ No newline at end of file diff --git a/examples/llamaindex/README.md b/examples/llamaindex/README.md deleted file mode 100644 index 82925b58..00000000 --- a/examples/llamaindex/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# LocalAI Demonstration with Embeddings - -This demonstration shows you how to use embeddings with existing data in LocalAI. -We are using the `llama-index` library to facilitate the embedding and querying processes. -The `Weaviate` client is used as the embedding source. - -## Getting Started - -1. Clone this repository and navigate to this directory - - ```bash - git clone git@github.com:mudler/LocalAI.git - cd LocalAI/examples/llamaindex - ``` - -2. pip install LlamaIndex and Weviate's client: `pip install llama-index>=0.9.9 weviate-client` -3. Run the example: `python main.py` - -```none -Downloading (…)lve/main/config.json: 100%|███████████████████████████| 684/684 [00:00<00:00, 6.01MB/s] -Downloading model.safetensors: 100%|███████████████████████████████| 133M/133M [00:03<00:00, 39.5MB/s] -Downloading (…)okenizer_config.json: 100%|███████████████████████████| 366/366 [00:00<00:00, 2.79MB/s] -Downloading (…)solve/main/vocab.txt: 100%|█████████████████████████| 232k/232k [00:00<00:00, 6.00MB/s] -Downloading (…)/main/tokenizer.json: 100%|█████████████████████████| 711k/711k [00:00<00:00, 18.8MB/s] -Downloading (…)cial_tokens_map.json: 100%|███████████████████████████| 125/125 [00:00<00:00, 1.18MB/s] -LocalAI is a community-driven project that aims to make AI accessible to everyone. It was created by Ettore Di Giacinto and is focused on providing various AI-related features such as text generation with GPTs, text to audio, audio to text, image generation, and more. The project is constantly growing and evolving, with a roadmap for future improvements. Anyone is welcome to contribute, provide feedback, and submit pull requests to help make LocalAI better. -``` diff --git a/examples/llamaindex/main.py b/examples/llamaindex/main.py deleted file mode 100644 index 89d86abd..00000000 --- a/examples/llamaindex/main.py +++ /dev/null @@ -1,29 +0,0 @@ -import weaviate -from llama_index import ServiceContext, VectorStoreIndex -from llama_index.llms import LOCALAI_DEFAULTS, OpenAILike -from llama_index.vector_stores import WeaviateVectorStore - -# Weaviate vector store setup -vector_store = WeaviateVectorStore( - weaviate_client=weaviate.Client("http://weviate.default"), index_name="AIChroma" -) - -# LLM setup, served via LocalAI -llm = OpenAILike(temperature=0, model="gpt-3.5-turbo", **LOCALAI_DEFAULTS) - -# Service context setup -service_context = ServiceContext.from_defaults(llm=llm, embed_model="local") - -# Load index from stored vectors -index = VectorStoreIndex.from_vector_store( - vector_store, service_context=service_context -) - -# Query engine setup -query_engine = index.as_query_engine( - similarity_top_k=1, vector_store_query_mode="hybrid" -) - -# Query example -response = query_engine.query("What is LocalAI?") -print(response) diff --git a/examples/localai-webui/README.md b/examples/localai-webui/README.md deleted file mode 100644 index 8e36f40a..00000000 --- a/examples/localai-webui/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# localai-webui - -Example of integration with [dhruvgera/localai-frontend](https://github.com/Dhruvgera/LocalAI-frontend). - -![image](https://user-images.githubusercontent.com/42107491/235344183-44b5967d-ba22-4331-804c-8da7004a5d35.png) - -## Setup - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/localai-webui - -# (optional) Checkout a specific LocalAI tag -# git checkout -b build - -# Download any desired models to models/ in the parent LocalAI project dir -# For example: wget https://gpt4all.io/models/ggml-gpt4all-j.bin - -# start with docker-compose -docker-compose up -d --build -``` - -Open http://localhost:3000 for the Web UI. - diff --git a/examples/localai-webui/docker-compose.yml b/examples/localai-webui/docker-compose.yml deleted file mode 100644 index 1609f604..00000000 --- a/examples/localai-webui/docker-compose.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - build: - context: . - dockerfile: Dockerfile - ports: - - 8080:8080 - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai"] - - frontend: - image: quay.io/go-skynet/localai-frontend:master - ports: - - 3000:3000 \ No newline at end of file diff --git a/examples/models/.gitignore b/examples/models/.gitignore deleted file mode 100644 index 9237b97c..00000000 --- a/examples/models/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -# Ignore everything but predefined models -* -!.gitignore -!completion.tmpl -!embeddings.yaml -!gpt4all.tmpl -!gpt-3.5-turbo.yaml diff --git a/examples/models/completion.tmpl b/examples/models/completion.tmpl deleted file mode 100644 index 9867cfcd..00000000 --- a/examples/models/completion.tmpl +++ /dev/null @@ -1 +0,0 @@ -{{.Input}} \ No newline at end of file diff --git a/examples/models/embeddings.yaml b/examples/models/embeddings.yaml deleted file mode 100644 index 536c8de1..00000000 --- a/examples/models/embeddings.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: text-embedding-ada-002 -parameters: - model: bert -threads: 4 -backend: bert-embeddings -embeddings: true diff --git a/examples/models/gpt-3.5-turbo.yaml b/examples/models/gpt-3.5-turbo.yaml deleted file mode 100644 index 5c192f5d..00000000 --- a/examples/models/gpt-3.5-turbo.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: gpt-3.5-turbo -parameters: - model: ggml-gpt4all-j - top_k: 80 - temperature: 0.2 - top_p: 0.7 -context_size: 1024 -stopwords: -- "HUMAN:" -- "GPT:" -roles: - user: " " - system: " " -template: - completion: completion - chat: gpt4all \ No newline at end of file diff --git a/examples/models/gpt4all.tmpl b/examples/models/gpt4all.tmpl deleted file mode 100644 index f76b080a..00000000 --- a/examples/models/gpt4all.tmpl +++ /dev/null @@ -1,4 +0,0 @@ -The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response. -### Prompt: -{{.Input}} -### Response: diff --git a/examples/privateGPT/README.md b/examples/privateGPT/README.md deleted file mode 100644 index faf682c4..00000000 --- a/examples/privateGPT/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# privateGPT - -This example is a re-adaptation of https://github.com/imartinez/privateGPT to work with LocalAI and OpenAI endpoints. We have a fork with the changes required to work with privateGPT here https://github.com/go-skynet/privateGPT ( PR: https://github.com/imartinez/privateGPT/pull/408 ). - -Follow the instructions in https://github.com/go-skynet/privateGPT: - -```bash -git clone git@github.com:go-skynet/privateGPT.git -cd privateGPT -pip install -r requirements.txt -``` - -Rename `example.env` to `.env` and edit the variables appropriately. - -This is an example `.env` file for LocalAI: - -``` -PERSIST_DIRECTORY=db -# Set to OpenAI here -MODEL_TYPE=OpenAI -EMBEDDINGS_MODEL_NAME=all-MiniLM-L6-v2 -MODEL_N_CTX=1000 -# LocalAI URL -OPENAI_API_BASE=http://localhost:8080/v1 -``` \ No newline at end of file diff --git a/examples/query_data/.gitignore b/examples/query_data/.gitignore deleted file mode 100644 index 29ea9d56..00000000 --- a/examples/query_data/.gitignore +++ /dev/null @@ -1 +0,0 @@ -storage/ \ No newline at end of file diff --git a/examples/query_data/README.md b/examples/query_data/README.md deleted file mode 100644 index c4e384cd..00000000 --- a/examples/query_data/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Data query example - -This example makes use of [Llama-Index](https://gpt-index.readthedocs.io/en/stable/getting_started/installation.html) to enable question answering on a set of documents. - -It loosely follows [the quickstart](https://gpt-index.readthedocs.io/en/stable/guides/primer/usage_pattern.html). - -Summary of the steps: - -- prepare the dataset (and store it into `data`) -- prepare a vector index database to run queries on -- run queries - -## Requirements - -You will need a training data set. Copy that over `data`. - -## Setup - -Start the API: - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/query_data - -wget https://huggingface.co/skeskinen/ggml/resolve/main/all-MiniLM-L6-v2/ggml-model-q4_0.bin -O models/bert -wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j - -# start with docker-compose -docker-compose up -d --build -``` - -### Create a storage - -In this step we will create a local vector database from our document set, so later we can ask questions on it with the LLM. - -Note: **OPENAI_API_KEY** is not required. However the library might fail if no API_KEY is passed by, so an arbitrary string can be used. - -```bash -export OPENAI_API_BASE=http://localhost:8080/v1 -export OPENAI_API_KEY=sk- - -python store.py -``` - -After it finishes, a directory "storage" will be created with the vector index database. - -## Query - -We can now query the dataset. - -```bash -export OPENAI_API_BASE=http://localhost:8080/v1 -export OPENAI_API_KEY=sk- - -python query.py -``` - -## Update - -To update our vector database, run `update.py` - -```bash -export OPENAI_API_BASE=http://localhost:8080/v1 -export OPENAI_API_KEY=sk- - -python update.py -``` \ No newline at end of file diff --git a/examples/query_data/data/.keep b/examples/query_data/data/.keep deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/query_data/docker-compose.yml b/examples/query_data/docker-compose.yml deleted file mode 100644 index cf76eb7f..00000000 --- a/examples/query_data/docker-compose.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - env_file: - - .env - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai"] diff --git a/examples/query_data/models b/examples/query_data/models deleted file mode 120000 index 1e266b1b..00000000 --- a/examples/query_data/models +++ /dev/null @@ -1 +0,0 @@ -../models \ No newline at end of file diff --git a/examples/query_data/query.py b/examples/query_data/query.py deleted file mode 100644 index 40375960..00000000 --- a/examples/query_data/query.py +++ /dev/null @@ -1,35 +0,0 @@ -import os - -# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended) -# os.environ['OPENAI_API_KEY']= "" - -from llama_index import LLMPredictor, PromptHelper, ServiceContext -from langchain.llms.openai import OpenAI -from llama_index import StorageContext, load_index_from_storage - -base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') - -# This example uses text-davinci-003 by default; feel free to change if desired -llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path)) - -# Configure prompt parameters and initialise helper -max_input_size = 500 -num_output = 256 -max_chunk_overlap = 0.2 - -prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap) - -# Load documents from the 'data' directory -service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper) - -# rebuild storage context -storage_context = StorageContext.from_defaults(persist_dir='./storage') - -# load index -index = load_index_from_storage(storage_context, service_context=service_context, ) - -query_engine = index.as_query_engine() - -data = input("Question: ") -response = query_engine.query(data) -print(response) diff --git a/examples/query_data/store.py b/examples/query_data/store.py deleted file mode 100644 index 9aec6217..00000000 --- a/examples/query_data/store.py +++ /dev/null @@ -1,27 +0,0 @@ -import os - -# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended) -# os.environ['OPENAI_API_KEY']= "" - -from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext -from langchain.llms.openai import OpenAI -from llama_index import StorageContext, load_index_from_storage - -base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') - -# This example uses text-davinci-003 by default; feel free to change if desired -llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path)) - -# Configure prompt parameters and initialise helper -max_input_size = 400 -num_output = 400 -max_chunk_overlap = 0.3 - -prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap) - -# Load documents from the 'data' directory -documents = SimpleDirectoryReader('data').load_data() -service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper, chunk_size_limit = 400) -index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context) -index.storage_context.persist(persist_dir="./storage") - diff --git a/examples/query_data/update.py b/examples/query_data/update.py deleted file mode 100644 index 55130d0f..00000000 --- a/examples/query_data/update.py +++ /dev/null @@ -1,32 +0,0 @@ -import os - -# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended) -# os.environ['OPENAI_API_KEY']= "" - -from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext -from langchain.llms.openai import OpenAI -from llama_index import StorageContext, load_index_from_storage - -base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') - -# This example uses text-davinci-003 by default; feel free to change if desired -llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path)) - -# Configure prompt parameters and initialise helper -max_input_size = 512 -num_output = 256 -max_chunk_overlap = 20 - -prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap) - -# Load documents from the 'data' directory -service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper) - -# rebuild storage context -storage_context = StorageContext.from_defaults(persist_dir='./storage') - -# load index -index = load_index_from_storage(storage_context, service_context=service_context, ) -documents = SimpleDirectoryReader('data').load_data() -index.refresh(documents) -index.storage_context.persist(persist_dir="./storage") \ No newline at end of file diff --git a/examples/rwkv/.gitignore b/examples/rwkv/.gitignore deleted file mode 100644 index ab3629c5..00000000 --- a/examples/rwkv/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -models/rwkv -models/rwkv.tokenizer.json \ No newline at end of file diff --git a/examples/rwkv/Dockerfile.build b/examples/rwkv/Dockerfile.build deleted file mode 100644 index 491f9ccd..00000000 --- a/examples/rwkv/Dockerfile.build +++ /dev/null @@ -1,12 +0,0 @@ -FROM python - -RUN apt-get update && apt-get -y install cmake - -# convert the model (one-off) -RUN pip3 install torch numpy - -WORKDIR /build -COPY ./scripts/ . - -RUN git clone --recurse-submodules https://github.com/saharNooby/rwkv.cpp && cd rwkv.cpp && cmake . && cmake --build . --config Release -ENTRYPOINT [ "/build/build.sh" ] \ No newline at end of file diff --git a/examples/rwkv/README.md b/examples/rwkv/README.md deleted file mode 100644 index 00ca5702..00000000 --- a/examples/rwkv/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# rwkv - -Example of how to run rwkv models. - -## Run models - -Setup: - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/rwkv - -# (optional) Checkout a specific LocalAI tag -# git checkout -b build - -# build the tooling image to convert an rwkv model locally: -docker build -t rwkv-converter -f Dockerfile.build . - -# download and convert a model (one-off) - it's going to be fast on CPU too! -docker run -ti --name converter -v $PWD:/data rwkv-converter https://huggingface.co/BlinkDL/rwkv-4-raven/resolve/main/RWKV-4-Raven-1B5-v11-Eng99%25-Other1%25-20230425-ctx4096.pth /data/models/rwkv - -# Get the tokenizer -wget https://raw.githubusercontent.com/saharNooby/rwkv.cpp/5eb8f09c146ea8124633ab041d9ea0b1f1db4459/rwkv/20B_tokenizer.json -O models/rwkv.tokenizer.json - -# start with docker-compose -docker-compose up -d --build -``` - -Test it out: - -```bash -curl http://localhost:8080/v1/completions -H "Content-Type: application/json" -d '{ - "model": "gpt-3.5-turbo", - "prompt": "A long time ago, in a galaxy far away", - "max_tokens": 100, - "temperature": 0.9, "top_p": 0.8, "top_k": 80 - }' - -# {"object":"text_completion","model":"gpt-3.5-turbo","choices":[{"text":", there was a small group of five friends: Annie, Bryan, Charlie, Emily, and Jesse."}],"usage":{"prompt_tokens":0,"completion_tokens":0,"total_tokens":0}} - -curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{ - "model": "gpt-3.5-turbo", - "messages": [{"role": "user", "content": "How are you?"}], - "temperature": 0.9, "top_p": 0.8, "top_k": 80 - }' - -# {"object":"chat.completion","model":"gpt-3.5-turbo","choices":[{"message":{"role":"assistant","content":" Good, thanks. I am about to go to bed. I' ll talk to you later.Bye."}}],"usage":{"prompt_tokens":0,"completion_tokens":0,"total_tokens":0}} -``` - -### Fine tuning - -See [RWKV-LM](https://github.com/BlinkDL/RWKV-LM#training--fine-tuning). There is also a Google [colab](https://colab.research.google.com/github/resloved/RWKV-notebooks/blob/master/RWKV_v4_RNN_Pile_Fine_Tuning.ipynb). - -## See also - -- [RWKV-LM](https://github.com/BlinkDL/RWKV-LM) -- [rwkv.cpp](https://github.com/saharNooby/rwkv.cpp) \ No newline at end of file diff --git a/examples/rwkv/docker-compose.yaml b/examples/rwkv/docker-compose.yaml deleted file mode 100644 index 8bd61b5f..00000000 --- a/examples/rwkv/docker-compose.yaml +++ /dev/null @@ -1,16 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - environment: - - DEBUG=true - - MODELS_PATH=/models - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai" ] diff --git a/examples/rwkv/models/gpt-3.5-turbo.yaml b/examples/rwkv/models/gpt-3.5-turbo.yaml deleted file mode 100644 index 1afce1a3..00000000 --- a/examples/rwkv/models/gpt-3.5-turbo.yaml +++ /dev/null @@ -1,18 +0,0 @@ -name: gpt-3.5-turbo -parameters: - model: rwkv - top_k: 80 - temperature: 0.9 - max_tokens: 100 - top_p: 0.8 -context_size: 1024 -backend: "rwkv" -cutwords: -- "Bob:.*" -roles: - user: "Bob:" - system: "Alice:" - assistant: "Alice:" -template: - completion: rwkv_completion - chat: rwkv_chat \ No newline at end of file diff --git a/examples/rwkv/models/rwkv_chat.tmpl b/examples/rwkv/models/rwkv_chat.tmpl deleted file mode 100644 index d2c0511e..00000000 --- a/examples/rwkv/models/rwkv_chat.tmpl +++ /dev/null @@ -1,13 +0,0 @@ -The following is a verbose detailed conversation between Bob and a woman, Alice. Alice is intelligent, friendly and likeable. Alice is likely to agree with Bob. - -Bob: Hello Alice, how are you doing? - -Alice: Hi Bob! Thanks, I'm fine. What about you? - -Bob: I am very good! It's nice to see you. Would you mind me chatting with you for a while? - -Alice: Not at all! I'm listening. - -{{.Input}} - -Alice: \ No newline at end of file diff --git a/examples/rwkv/models/rwkv_completion.tmpl b/examples/rwkv/models/rwkv_completion.tmpl deleted file mode 100644 index 8450377f..00000000 --- a/examples/rwkv/models/rwkv_completion.tmpl +++ /dev/null @@ -1 +0,0 @@ -Complete the following sentence: {{.Input}} \ No newline at end of file diff --git a/examples/rwkv/scripts/build.sh b/examples/rwkv/scripts/build.sh deleted file mode 100755 index 37720582..00000000 --- a/examples/rwkv/scripts/build.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -set -ex - -URL=$1 -OUT=$2 -FILENAME=$(basename $URL) - -wget -nc $URL -O /build/$FILENAME - -python3 /build/rwkv.cpp/rwkv/convert_pytorch_to_ggml.py /build/$FILENAME /build/float-model float16 -python3 /build/rwkv.cpp/rwkv/quantize.py /build/float-model $OUT Q4_0 diff --git a/examples/semantic-todo/README.md b/examples/semantic-todo/README.md deleted file mode 100644 index 701346e4..00000000 --- a/examples/semantic-todo/README.md +++ /dev/null @@ -1,15 +0,0 @@ -This demonstrates the vector store backend in its simplest form. -You can add tasks and then search/sort them using the TUI. - -To build and run do - -```bash -$ go get . -$ go run . -``` - -A separate LocaAI instance is required of course. For e.g. - -```bash -$ docker run -e DEBUG=true --rm -it -p 8080:8080 bert-cpp -``` diff --git a/examples/semantic-todo/go.mod b/examples/semantic-todo/go.mod deleted file mode 100644 index 56e563ec..00000000 --- a/examples/semantic-todo/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module semantic-todo - -go 1.22 - -require ( - github.com/gdamore/tcell/v2 v2.7.4 - github.com/rivo/tview v0.0.0-20240524063012-037df494fb76 -) - -require ( - github.com/gdamore/encoding v1.0.0 // indirect - github.com/lucasb-eyer/go-colorful v1.2.0 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/rivo/uniseg v0.4.7 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect -) diff --git a/examples/semantic-todo/go.sum b/examples/semantic-todo/go.sum deleted file mode 100644 index bdd4979f..00000000 --- a/examples/semantic-todo/go.sum +++ /dev/null @@ -1,50 +0,0 @@ -github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= -github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= -github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU= -github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg= -github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= -github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/rivo/tview v0.0.0-20240524063012-037df494fb76 h1:iqvDlgyjmqleATtFbA7c14djmPh2n4mCYUv7JlD/ruA= -github.com/rivo/tview v0.0.0-20240524063012-037df494fb76/go.mod h1:02iFIz7K/A9jGCvrizLPvoqr4cEIx7q54RH5Qudkrss= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/examples/semantic-todo/main.go b/examples/semantic-todo/main.go deleted file mode 100644 index a8936ea1..00000000 --- a/examples/semantic-todo/main.go +++ /dev/null @@ -1,355 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - - "github.com/gdamore/tcell/v2" - "github.com/rivo/tview" -) - -const ( - localAI string = "http://localhost:8080" - rootStatus string = "[::b][::-]: Add Task [::b]/[::-]: Search Task [::b][::-]: Exit" - inputStatus string = "Press [::b][::-] to submit the task, [::b][::-] to cancel" -) - -type Task struct { - Description string - Similarity float32 -} - -type AppState int - -const ( - StateRoot AppState = iota - StateInput - StateSearch -) - -type App struct { - state AppState - tasks []Task - app *tview.Application - flex *tview.Flex - table *tview.Table -} - -func NewApp() *App { - return &App{ - state: StateRoot, - tasks: []Task{ - {Description: "Take the dog for a walk (after I get a dog)"}, - {Description: "Go to the toilet"}, - {Description: "Allow TODOs to be marked completed or removed"}, - }, - } -} - -func getEmbeddings(description string) ([]float32, error) { - // Define the request payload - payload := map[string]interface{}{ - "model": "bert-cpp-minilm-v6", - "input": description, - } - - // Marshal the payload into JSON - jsonPayload, err := json.Marshal(payload) - if err != nil { - return nil, err - } - - // Make the HTTP request to the local OpenAI embeddings API - resp, err := http.Post(localAI+"/embeddings", "application/json", bytes.NewBuffer(jsonPayload)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - // Check if the request was successful - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("request to embeddings API failed with status code: %d", resp.StatusCode) - } - - // Parse the response body - var result struct { - Data []struct { - Embedding []float32 `json:"embedding"` - } `json:"data"` - } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { - return nil, err - } - - // Return the embedding - if len(result.Data) > 0 { - return result.Data[0].Embedding, nil - } - return nil, errors.New("no embedding received from API") -} - -type StoresSet struct { - Store string `json:"store,omitempty" yaml:"store,omitempty"` - - Keys [][]float32 `json:"keys" yaml:"keys"` - Values []string `json:"values" yaml:"values"` -} - -func postTasksToExternalService(tasks []Task) error { - keys := make([][]float32, 0, len(tasks)) - // Get the embeddings for the task description - for _, task := range tasks { - embedding, err := getEmbeddings(task.Description) - if err != nil { - return err - } - keys = append(keys, embedding) - } - - values := make([]string, 0, len(tasks)) - for _, task := range tasks { - values = append(values, task.Description) - } - - // Construct the StoresSet object - storesSet := StoresSet{ - Store: "tasks_store", // Assuming you have a specific store name - Keys: keys, - Values: values, - } - - // Marshal the StoresSet object into JSON - jsonData, err := json.Marshal(storesSet) - if err != nil { - return err - } - - // Make the HTTP POST request to the external service - resp, err := http.Post(localAI+"/stores/set", "application/json", bytes.NewBuffer(jsonData)) - if err != nil { - return err - } - defer resp.Body.Close() - - // Check if the request was successful - if resp.StatusCode != http.StatusOK { - // read resp body into string - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - return fmt.Errorf("store request failed with status code: %d: %s", resp.StatusCode, body) - } - - return nil -} - -type StoresFind struct { - Store string `json:"store,omitempty" yaml:"store,omitempty"` - - Key []float32 `json:"key" yaml:"key"` - Topk int `json:"topk" yaml:"topk"` -} - -type StoresFindResponse struct { - Keys [][]float32 `json:"keys" yaml:"keys"` - Values []string `json:"values" yaml:"values"` - Similarities []float32 `json:"similarities" yaml:"similarities"` -} - -func findSimilarTexts(inputText string, topk int) (StoresFindResponse, error) { - // Initialize an empty response object - response := StoresFindResponse{} - - // Get the embedding for the input text - embedding, err := getEmbeddings(inputText) - if err != nil { - return response, err - } - - // Construct the StoresFind object - storesFind := StoresFind{ - Store: "tasks_store", // Assuming you have a specific store name - Key: embedding, - Topk: topk, - } - - // Marshal the StoresFind object into JSON - jsonData, err := json.Marshal(storesFind) - if err != nil { - return response, err - } - - // Make the HTTP POST request to the external service's /stores/find endpoint - resp, err := http.Post(localAI+"/stores/find", "application/json", bytes.NewBuffer(jsonData)) - if err != nil { - return response, err - } - defer resp.Body.Close() - - // Check if the request was successful - if resp.StatusCode != http.StatusOK { - return response, fmt.Errorf("request to /stores/find failed with status code: %d", resp.StatusCode) - } - - // Parse the response body to retrieve similar texts and similarities - if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { - return response, err - } - - return response, nil -} - -func (app *App) updateUI() { - // Clear the flex layout - app.flex.Clear() - app.flex.SetDirection(tview.FlexColumn) - app.flex.AddItem(nil, 0, 1, false) - - midCol := tview.NewFlex() - midCol.SetDirection(tview.FlexRow) - midCol.AddItem(nil, 0, 1, false) - - // Create a new table. - app.table.Clear() - app.table.SetBorders(true) - - // Set table headers - app.table.SetCell(0, 0, tview.NewTableCell("Description").SetAlign(tview.AlignLeft).SetExpansion(1).SetAttributes(tcell.AttrBold)) - app.table.SetCell(0, 1, tview.NewTableCell("Similarity").SetAlign(tview.AlignCenter).SetExpansion(0).SetAttributes(tcell.AttrBold)) - - // Add the tasks to the table. - for i, task := range app.tasks { - row := i + 1 - app.table.SetCell(row, 0, tview.NewTableCell(task.Description)) - app.table.SetCell(row, 1, tview.NewTableCell(fmt.Sprintf("%.2f", task.Similarity))) - } - - if app.state == StateInput { - inputField := tview.NewInputField() - inputField. - SetLabel("New Task: "). - SetFieldWidth(0). - SetDoneFunc(func(key tcell.Key) { - if key == tcell.KeyEnter { - task := Task{Description: inputField.GetText()} - app.tasks = append(app.tasks, task) - app.state = StateRoot - err := postTasksToExternalService([]Task{task}) - if err != nil { - panic(err) - } - } - app.updateUI() - }) - midCol.AddItem(inputField, 3, 2, true) - app.app.SetFocus(inputField) - } else if app.state == StateSearch { - searchField := tview.NewInputField() - searchField.SetLabel("Search: "). - SetFieldWidth(0). - SetDoneFunc(func(key tcell.Key) { - if key == tcell.KeyEnter { - similar, err := findSimilarTexts(searchField.GetText(), 100) - if err != nil { - panic(err) - } - app.tasks = make([]Task, len(similar.Keys)) - for i, v := range similar.Values { - app.tasks[i] = Task{Description: v, Similarity: similar.Similarities[i]} - } - } - app.updateUI() - }) - midCol.AddItem(searchField, 3, 2, true) - app.app.SetFocus(searchField) - } else { - midCol.AddItem(nil, 3, 1, false) - } - - midCol.AddItem(app.table, 0, 2, true) - - // Add the status bar to the flex layout - statusBar := tview.NewTextView(). - SetText(rootStatus). - SetDynamicColors(true). - SetTextAlign(tview.AlignCenter) - if app.state == StateInput { - statusBar.SetText(inputStatus) - } - midCol.AddItem(statusBar, 1, 1, false) - midCol.AddItem(nil, 0, 1, false) - - app.flex.AddItem(midCol, 0, 10, true) - app.flex.AddItem(nil, 0, 1, false) - - // Set the flex as the root element - app.app.SetRoot(app.flex, true) -} - -func main() { - app := NewApp() - tApp := tview.NewApplication() - flex := tview.NewFlex().SetDirection(tview.FlexRow) - table := tview.NewTable() - - app.app = tApp - app.flex = flex - app.table = table - - app.updateUI() // Initial UI setup - - app.app.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { - switch app.state { - case StateRoot: - // Handle key events when in the root state - switch event.Key() { - case tcell.KeyRune: - switch event.Rune() { - case ' ': - app.state = StateInput - app.updateUI() - return nil // Event is handled - case '/': - app.state = StateSearch - app.updateUI() - return nil // Event is handled - } - } - - case StateInput: - // Handle key events when in the input state - if event.Key() == tcell.KeyEsc { - // Exit input state without adding a task - app.state = StateRoot - app.updateUI() - return nil // Event is handled - } - - case StateSearch: - // Handle key events when in the search state - if event.Key() == tcell.KeyEsc { - // Exit search state - app.state = StateRoot - app.updateUI() - return nil // Event is handled - } - } - - // Return the event for further processing by tview - return event - }) - - if err := postTasksToExternalService(app.tasks); err != nil { - panic(err) - } - - // Start the application - if err := app.app.Run(); err != nil { - panic(err) - } -} diff --git a/examples/slack-bot/.env.example b/examples/slack-bot/.env.example deleted file mode 100644 index e169e0cf..00000000 --- a/examples/slack-bot/.env.example +++ /dev/null @@ -1,14 +0,0 @@ -# CPU .env docs: https://localai.io/howtos/easy-setup-docker-cpu/ -# GPU .env docs: https://localai.io/howtos/easy-setup-docker-gpu/ - -SLACK_APP_TOKEN=xapp-1-... -SLACK_BOT_TOKEN=xoxb-... -OPENAI_API_KEY=sk-... -OPENAI_API_BASE=http://api:8080 -OPENAI_MODEL=gpt-3.5-turbo -OPENAI_TIMEOUT_SECONDS=60 -#OPENAI_SYSTEM_TEXT="You proofread text. When you receive a message, you will check -#for mistakes and make suggestion to improve the language of the given text" -USE_SLACK_LANGUAGE=true -SLACK_APP_LOG_LEVEL=INFO -TRANSLATE_MARKDOWN=true \ No newline at end of file diff --git a/examples/slack-bot/README.md b/examples/slack-bot/README.md deleted file mode 100644 index 23a5c884..00000000 --- a/examples/slack-bot/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Slack bot - -Slackbot using: https://github.com/seratch/ChatGPT-in-Slack - -## Setup - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/slack-bot - -git clone https://github.com/seratch/ChatGPT-in-Slack - -# (optional) Checkout a specific LocalAI tag -# git checkout -b build - -# Download gpt4all-j to models/ -wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j - -# Set the Slack bot options (see: https://github.com/seratch/ChatGPT-in-Slack) -cp -rfv .env.example .env -vim .env - -# start with docker-compose -docker-compose up -d --build -``` \ No newline at end of file diff --git a/examples/slack-bot/docker-compose.yaml b/examples/slack-bot/docker-compose.yaml deleted file mode 100644 index 12cb70b0..00000000 --- a/examples/slack-bot/docker-compose.yaml +++ /dev/null @@ -1,23 +0,0 @@ -version: '3.6' - -services: - api: - image: quay.io/go-skynet/local-ai:latest - build: - context: ../../ - dockerfile: Dockerfile - ports: - - 8080:8080 - environment: - - DEBUG=true - - MODELS_PATH=/models - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai" ] - - bot: - build: - context: ./ChatGPT-in-Slack - dockerfile: Dockerfile - env_file: - - .env diff --git a/examples/slack-bot/models b/examples/slack-bot/models deleted file mode 120000 index 1e266b1b..00000000 --- a/examples/slack-bot/models +++ /dev/null @@ -1 +0,0 @@ -../models \ No newline at end of file diff --git a/examples/slack-qa-bot/.env.example b/examples/slack-qa-bot/.env.example deleted file mode 100644 index 29c68dde..00000000 --- a/examples/slack-qa-bot/.env.example +++ /dev/null @@ -1,51 +0,0 @@ -# CPU .env docs: https://localai.io/howtos/easy-setup-docker-cpu/ -# GPU .env docs: https://localai.io/howtos/easy-setup-docker-gpu/ - -# Create an app-level token with connections:write scope -SLACK_APP_TOKEN=xapp-1-... -# Install the app into your workspace to grab this token -SLACK_BOT_TOKEN=xoxb-... - -# Set this to a random string, it doesn't matter, however if present the python library complains -OPENAI_API_KEY=sk-foo-bar-baz - -# Optional: gpt-3.5-turbo and gpt-4 are currently supported (default: gpt-3.5-turbo) -OPENAI_MODEL=gpt-3.5-turbo -# Optional: You can adjust the timeout seconds for OpenAI calls (default: 30) -OPENAI_TIMEOUT_SECONDS=560 - -MEMORY_DIR=/tmp/memory_dir - -OPENAI_API_BASE=http://api:8080/v1 - -EMBEDDINGS_MODEL_NAME=all-MiniLM-L6-v2 - -## Repository and sitemap to index in the vector database on start -SITEMAP="https://kairos.io/sitemap.xml" - -# Optional repository names. -# REPOSITORIES="foo,bar" -# # Define clone URL for "foo" -# foo_CLONE_URL="http://github.com.." -# bar_CLONE_URL="..." -# # Define branch for foo -# foo_BRANCH="master" -# Optional token if scraping issues -# GITHUB_PERSONAL_ACCESS_TOKEN="" -# ISSUE_REPOSITORIES="go-skynet/LocalAI,foo/bar,..." - -# Optional: When the string is "true", this app translates ChatGPT prompts into a user's preferred language (default: true) -USE_SLACK_LANGUAGE=true -# Optional: Adjust the app's logging level (default: DEBUG) -SLACK_APP_LOG_LEVEL=INFO -# Optional: When the string is "true", translate between OpenAI markdown and Slack mrkdwn format (default: false) -TRANSLATE_MARKDOWN=true - - -### LocalAI - -DEBUG=true -MODELS_PATH=/models -IMAGE_PATH=/tmp -# See: https://github.com/go-skynet/model-gallery -PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}] \ No newline at end of file diff --git a/examples/slack-qa-bot/README.md b/examples/slack-qa-bot/README.md deleted file mode 100644 index 7844d669..00000000 --- a/examples/slack-qa-bot/README.md +++ /dev/null @@ -1,23 +0,0 @@ -## Slack QA Bot - -This example uses https://github.com/spectrocloud-labs/Slack-QA-bot to deploy a slack bot that can answer to your documentation! - -- Create a new Slack app using the manifest-dev.yml file -- Install the app into your Slack workspace -- Retrieve your slack keys and edit `.env` -- Start the app - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/slack-qa-bot - -cp -rfv .env.example .env - -# Edit .env and add slackbot api keys, or repository settings to scan -vim .env - -# run the bot -docker-compose up -``` diff --git a/examples/slack-qa-bot/deployment.yaml b/examples/slack-qa-bot/deployment.yaml deleted file mode 100644 index 498e35d8..00000000 --- a/examples/slack-qa-bot/deployment.yaml +++ /dev/null @@ -1,97 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: slack-bot ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: knowledgebase - namespace: slack-bot - labels: - app: localai-qabot -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: localai-qabot - namespace: slack-bot - labels: - app: localai-qabot -spec: - selector: - matchLabels: - app: localai-qabot - replicas: 1 - template: - metadata: - labels: - app: localai-qabot - name: localai-qabot - spec: - containers: - - name: localai-qabot-slack - env: - - name: OPENAI_API_KEY - value: "x" - - name: SLACK_APP_TOKEN - value: "xapp-1-" - - name: SLACK_BOT_TOKEN - value: "xoxb-" - - name: OPENAI_MODEL - value: "gpt-3.5-turbo" - - name: OPENAI_TIMEOUT_SECONDS - value: "400" - - name: OPENAI_SYSTEM_TEXT - value: "" - - name: MEMORY_DIR - value: "/memory" - - name: TRANSLATE_MARKDOWN - value: "true" - - name: OPENAI_API_BASE - value: "http://local-ai.default.svc.cluster.local:8080" - - name: REPOSITORIES - value: "KAIROS,AGENT,SDK,OSBUILDER,PACKAGES,IMMUCORE" - - name: KAIROS_CLONE_URL - value: "https://github.com/kairos-io/kairos" - - name: KAIROS_BRANCH - value: "master" - - name: AGENT_CLONE_URL - value: "https://github.com/kairos-io/kairos-agent" - - name: AGENT_BRANCH - value: "main" - - name: SDK_CLONE_URL - value: "https://github.com/kairos-io/kairos-sdk" - - name: SDK_BRANCH - value: "main" - - name: OSBUILDER_CLONE_URL - value: "https://github.com/kairos-io/osbuilder" - - name: OSBUILDER_BRANCH - value: "master" - - name: PACKAGES_CLONE_URL - value: "https://github.com/kairos-io/packages" - - name: PACKAGES_BRANCH - value: "main" - - name: IMMUCORE_CLONE_URL - value: "https://github.com/kairos-io/immucore" - - name: IMMUCORE_BRANCH - value: "master" - - name: GITHUB_PERSONAL_ACCESS_TOKEN - value: "" - - name: ISSUE_REPOSITORIES - value: "kairos-io/kairos" - image: quay.io/spectrocloud-labs/slack-qa-local-bot:qa - imagePullPolicy: Always - volumeMounts: - - mountPath: "/memory" - name: knowledgebase - volumes: - - name: knowledgebase - persistentVolumeClaim: - claimName: knowledgebase \ No newline at end of file diff --git a/examples/slack-qa-bot/docker-compose.yml b/examples/slack-qa-bot/docker-compose.yml deleted file mode 100644 index bef32023..00000000 --- a/examples/slack-qa-bot/docker-compose.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: "3" - -services: - api: - image: quay.io/go-skynet/local-ai:latest - # As initially LocalAI will download the models defined in PRELOAD_MODELS - # you might need to tweak the healthcheck values here according to your network connection. - # Here we give a timespan of 20m to download all the required files. - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] - interval: 1m - timeout: 20m - retries: 20 - ports: - - 8080:8080 - env_file: - - .env - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai" ] - - slackbot: - image: quay.io/spectrocloud-labs/slack-qa-local-bot:qa - container_name: slackbot - restart: always - env_file: - - .env - depends_on: - api: - condition: service_healthy diff --git a/examples/streamlit-bot/.gitignore b/examples/streamlit-bot/.gitignore deleted file mode 100644 index 0fb83220..00000000 --- a/examples/streamlit-bot/.gitignore +++ /dev/null @@ -1 +0,0 @@ -installer_files \ No newline at end of file diff --git a/examples/streamlit-bot/LICENSE b/examples/streamlit-bot/LICENSE deleted file mode 100644 index e6200f0f..00000000 --- a/examples/streamlit-bot/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 Manohar Joshi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/examples/streamlit-bot/Main.py b/examples/streamlit-bot/Main.py deleted file mode 100644 index 0063289a..00000000 --- a/examples/streamlit-bot/Main.py +++ /dev/null @@ -1,70 +0,0 @@ -import streamlit as st -import time -import requests -import json - -def ask(prompt): - url = 'http://localhost:8080/v1/chat/completions' - myobj = { - "model": "ggml-gpt4all-j.bin", - "messages": [{"role": "user", "content": prompt}], - "temperature": 0.9 - } - myheaders = { "Content-Type" : "application/json" } - - x = requests.post(url, json = myobj, headers=myheaders) - - print(x.text) - - json_data = json.loads(x.text) - - return json_data["choices"][0]["message"]["content"] - - -def main(): - # Page setup - st.set_page_config(page_title="Ask your LLM") - st.header("Ask your Question 💬") - - # Initialize chat history - if "messages" not in st.session_state: - st.session_state.messages = [] - - # Display chat messages from history on app rerun - for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - - # Scroll to bottom - st.markdown( - """ - - """, - unsafe_allow_html=True, - ) - - # React to user input - if prompt := st.chat_input("What is up?"): - # Display user message in chat message container - st.chat_message("user").markdown(prompt) - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) - print(f"User has asked the following question: {prompt}") - - # Process - response = "" - with st.spinner('Processing...'): - response = ask(prompt) - - #response = f"Echo: {prompt}" - # Display assistant response in chat message container - with st.chat_message("assistant"): - st.markdown(response) - # Add assistant response to chat history - st.session_state.messages.append({"role": "assistant", "content": response}) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/examples/streamlit-bot/README.md b/examples/streamlit-bot/README.md deleted file mode 100644 index 6588cbde..00000000 --- a/examples/streamlit-bot/README.md +++ /dev/null @@ -1,54 +0,0 @@ -## Streamlit bot - -![Screenshot](streamlit-bot.png) - -This is an example to deploy a Streamlit bot with LocalAI instead of OpenAI. Instructions are for Windows. - -```bash -# Install & run Git Bash - -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI.git -cd LocalAI - -# (optional) Checkout a specific LocalAI tag -# git checkout -b build - -# Use a template from the examples -cp -rf prompt-templates/ggml-gpt4all-j.tmpl models/ - -# (optional) Edit the .env file to set things like context size and threads -# vim .env -# Download model -curl --progress-bar -C - -O https://gpt4all.io/models/ggml-gpt4all-j.bin > models/ggml-gpt4all-j.bin - -# Install & Run Docker Desktop for Windows -https://www.docker.com/products/docker-desktop/ - -# start with docker-compose -docker-compose up -d --pull always -# or you can build the images with: -# docker-compose up -d --build -# Now API is accessible at localhost:8080 -curl http://localhost:8080/v1/models -# {"object":"list","data":[{"id":"ggml-gpt4all-j","object":"model"}]} - -curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{ - "model": "ggml-gpt4all-j", - "messages": [{"role": "user", "content": "How are you?"}], - "temperature": 0.9 - }' - -# {"model":"ggml-gpt4all-j","choices":[{"message":{"role":"assistant","content":"I'm doing well, thanks. How about you?"}}]} - -cd examples/streamlit-bot - -install_requirements.bat - -# run the bot -start_windows.bat - -# UI will be launched automatically (http://localhost:8501/) in browser. - -``` - diff --git a/examples/streamlit-bot/cmd_windows.bat b/examples/streamlit-bot/cmd_windows.bat deleted file mode 100644 index 606ff485..00000000 --- a/examples/streamlit-bot/cmd_windows.bat +++ /dev/null @@ -1,31 +0,0 @@ -@echo off - -cd /D "%~dp0" - -set PATH=%PATH%;%SystemRoot%\system32 - -echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end - -@rem fix failed install when installing to a separate drive -set TMP=%cd%\installer_files -set TEMP=%cd%\installer_files - -@rem config -set CONDA_ROOT_PREFIX=%cd%\installer_files\conda -set INSTALL_ENV_DIR=%cd%\installer_files\env - -@rem environment isolation -set PYTHONNOUSERSITE=1 -set PYTHONPATH= -set PYTHONHOME= -set "CUDA_PATH=%INSTALL_ENV_DIR%" -set "CUDA_HOME=%CUDA_PATH%" - -@rem activate installer env -call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) - -@rem enter commands -cmd /k "%*" - -:end -pause diff --git a/examples/streamlit-bot/install_requirements.bat b/examples/streamlit-bot/install_requirements.bat deleted file mode 100644 index 534091ee..00000000 --- a/examples/streamlit-bot/install_requirements.bat +++ /dev/null @@ -1,81 +0,0 @@ -@echo off - -cd /D "%~dp0" - -set PATH=%PATH%;%SystemRoot%\system32 - -echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end - -@rem Check for special characters in installation path -set "SPCHARMESSAGE="WARNING: Special characters were detected in the installation path!" " This can cause the installation to fail!"" -echo "%CD%"| findstr /R /C:"[!#\$%&()\*+,;<=>?@\[\]\^`{|}~]" >nul && ( - call :PrintBigMessage %SPCHARMESSAGE% -) -set SPCHARMESSAGE= - -@rem fix failed install when installing to a separate drive -set TMP=%cd%\installer_files -set TEMP=%cd%\installer_files - -@rem config -set INSTALL_DIR=%cd%\installer_files -set CONDA_ROOT_PREFIX=%cd%\installer_files\conda -set INSTALL_ENV_DIR=%cd%\installer_files\env -set MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Windows-x86_64.exe -set conda_exists=F - -@rem figure out whether git and conda needs to be installed -call "%CONDA_ROOT_PREFIX%\_conda.exe" --version >nul 2>&1 -if "%ERRORLEVEL%" EQU "0" set conda_exists=T - -@rem (if necessary) install git and conda into a contained environment -@rem download conda -if "%conda_exists%" == "F" ( - echo Downloading Miniconda from %MINICONDA_DOWNLOAD_URL% to %INSTALL_DIR%\miniconda_installer.exe - - mkdir "%INSTALL_DIR%" - call curl -Lk "%MINICONDA_DOWNLOAD_URL%" > "%INSTALL_DIR%\miniconda_installer.exe" || ( echo. && echo Miniconda failed to download. && goto end ) - - echo Installing Miniconda to %CONDA_ROOT_PREFIX% - start /wait "" "%INSTALL_DIR%\miniconda_installer.exe" /InstallationType=JustMe /NoShortcuts=1 /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=%CONDA_ROOT_PREFIX% - - @rem test the conda binary - echo Miniconda version: - call "%CONDA_ROOT_PREFIX%\_conda.exe" --version || ( echo. && echo Miniconda not found. && goto end ) -) - -@rem create the installer env -if not exist "%INSTALL_ENV_DIR%" ( - echo Packages to install: %PACKAGES_TO_INSTALL% - call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.10 || ( echo. && echo Conda environment creation failed. && goto end ) -) - -@rem check if conda environment was actually created -if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo Conda environment is empty. && goto end ) - -@rem environment isolation -set PYTHONNOUSERSITE=1 -set PYTHONPATH= -set PYTHONHOME= -set "CUDA_PATH=%INSTALL_ENV_DIR%" -set "CUDA_HOME=%CUDA_PATH%" - -@rem activate installer env -call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) - -@rem setup installer env -call pip install -r requirements.txt - -@rem below are functions for the script next line skips these during normal execution -goto end - -:PrintBigMessage -echo. && echo. -echo ******************************************************************* -for %%M in (%*) do echo * %%~M -echo ******************************************************************* -echo. && echo. -exit /b - -:end -pause \ No newline at end of file diff --git a/examples/streamlit-bot/requirements.txt b/examples/streamlit-bot/requirements.txt deleted file mode 100644 index 275060a2..00000000 --- a/examples/streamlit-bot/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -streamlit==1.39.0 -requests \ No newline at end of file diff --git a/examples/streamlit-bot/start_windows.bat b/examples/streamlit-bot/start_windows.bat deleted file mode 100644 index fd76ab15..00000000 --- a/examples/streamlit-bot/start_windows.bat +++ /dev/null @@ -1,81 +0,0 @@ -@echo off - -cd /D "%~dp0" - -set PATH=%PATH%;%SystemRoot%\system32 - -echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end - -@rem Check for special characters in installation path -set "SPCHARMESSAGE="WARNING: Special characters were detected in the installation path!" " This can cause the installation to fail!"" -echo "%CD%"| findstr /R /C:"[!#\$%&()\*+,;<=>?@\[\]\^`{|}~]" >nul && ( - call :PrintBigMessage %SPCHARMESSAGE% -) -set SPCHARMESSAGE= - -@rem fix failed install when installing to a separate drive -set TMP=%cd%\installer_files -set TEMP=%cd%\installer_files - -@rem config -set INSTALL_DIR=%cd%\installer_files -set CONDA_ROOT_PREFIX=%cd%\installer_files\conda -set INSTALL_ENV_DIR=%cd%\installer_files\env -set MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Windows-x86_64.exe -set conda_exists=F - -@rem figure out whether git and conda needs to be installed -call "%CONDA_ROOT_PREFIX%\_conda.exe" --version >nul 2>&1 -if "%ERRORLEVEL%" EQU "0" set conda_exists=T - -@rem (if necessary) install git and conda into a contained environment -@rem download conda -if "%conda_exists%" == "F" ( - echo Downloading Miniconda from %MINICONDA_DOWNLOAD_URL% to %INSTALL_DIR%\miniconda_installer.exe - - mkdir "%INSTALL_DIR%" - call curl -Lk "%MINICONDA_DOWNLOAD_URL%" > "%INSTALL_DIR%\miniconda_installer.exe" || ( echo. && echo Miniconda failed to download. && goto end ) - - echo Installing Miniconda to %CONDA_ROOT_PREFIX% - start /wait "" "%INSTALL_DIR%\miniconda_installer.exe" /InstallationType=JustMe /NoShortcuts=1 /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=%CONDA_ROOT_PREFIX% - - @rem test the conda binary - echo Miniconda version: - call "%CONDA_ROOT_PREFIX%\_conda.exe" --version || ( echo. && echo Miniconda not found. && goto end ) -) - -@rem create the installer env -if not exist "%INSTALL_ENV_DIR%" ( - echo Packages to install: %PACKAGES_TO_INSTALL% - call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.10 || ( echo. && echo Conda environment creation failed. && goto end ) -) - -@rem check if conda environment was actually created -if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo Conda environment is empty. && goto end ) - -@rem environment isolation -set PYTHONNOUSERSITE=1 -set PYTHONPATH= -set PYTHONHOME= -set "CUDA_PATH=%INSTALL_ENV_DIR%" -set "CUDA_HOME=%CUDA_PATH%" - -@rem activate installer env -call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) - -@rem setup installer env -streamlit run Main.py - -@rem below are functions for the script next line skips these during normal execution -goto end - -:PrintBigMessage -echo. && echo. -echo ******************************************************************* -for %%M in (%*) do echo * %%~M -echo ******************************************************************* -echo. && echo. -exit /b - -:end -pause \ No newline at end of file diff --git a/examples/streamlit-bot/streamlit-bot.png b/examples/streamlit-bot/streamlit-bot.png deleted file mode 100644 index 7b69ba99..00000000 Binary files a/examples/streamlit-bot/streamlit-bot.png and /dev/null differ diff --git a/examples/telegram-bot/README.md b/examples/telegram-bot/README.md deleted file mode 100644 index d0ab0dfd..00000000 --- a/examples/telegram-bot/README.md +++ /dev/null @@ -1,30 +0,0 @@ -## Telegram bot - -![Screenshot from 2023-06-09 00-36-26](https://github.com/go-skynet/LocalAI/assets/2420543/e98b4305-fa2d-41cf-9d2f-1bb2d75ca902) - -This example uses a fork of [chatgpt-telegram-bot](https://github.com/karfly/chatgpt_telegram_bot) to deploy a telegram bot with LocalAI instead of OpenAI. - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/telegram-bot - -git clone https://github.com/mudler/chatgpt_telegram_bot - -cp -rf docker-compose.yml chatgpt_telegram_bot - -cd chatgpt_telegram_bot - -mv config/config.example.yml config/config.yml -mv config/config.example.env config/config.env - -# Edit config/config.yml to set the telegram bot token -vim config/config.yml - -# run the bot -docker-compose --env-file config/config.env up --build -``` - -Note: LocalAI is configured to download `gpt4all-j` in place of `gpt-3.5-turbo` and `stablediffusion` for image generation at the first start. Download size is >6GB, if your network connection is slow, adapt the `docker-compose.yml` file healthcheck section accordingly (replace `20m`, for instance with `1h`, etc.). -To configure models manually, comment the `PRELOAD_MODELS` environment variable in the `docker-compose.yml` file and see for instance the [chatbot-ui-manual example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual) `model` directory. \ No newline at end of file diff --git a/examples/telegram-bot/docker-compose.yml b/examples/telegram-bot/docker-compose.yml deleted file mode 100644 index 297fae20..00000000 --- a/examples/telegram-bot/docker-compose.yml +++ /dev/null @@ -1,38 +0,0 @@ -version: "3" - -services: - api: - image: quay.io/go-skynet/local-ai:latest - # As initially LocalAI will download the models defined in PRELOAD_MODELS - # you might need to tweak the healthcheck values here according to your network connection. - # Here we give a timespan of 20m to download all the required files. - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] - interval: 1m - timeout: 20m - retries: 20 - ports: - - 8080:8080 - environment: - - DEBUG=true - - MODELS_PATH=/models - - IMAGE_PATH=/tmp - # You can preload different models here as well. - # See: https://github.com/go-skynet/model-gallery - - 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, {"url": "github:go-skynet/model-gallery/stablediffusion.yaml"}, {"url": "github:go-skynet/model-gallery/whisper-base.yaml", "name": "whisper-1"}]' - volumes: - - ./models:/models:cached - command: ["/usr/bin/local-ai"] - chatgpt_telegram_bot: - container_name: chatgpt_telegram_bot - command: python3 bot/bot.py - restart: always - environment: - - OPENAI_API_KEY=sk---anystringhere - - OPENAI_API_BASE=http://api:8080/v1 - build: - context: "." - dockerfile: Dockerfile - depends_on: - api: - condition: service_healthy