diff --git a/.github/workflows/comment-pr.yaml b/.github/workflows/comment-pr.yaml index b8ff45b0..92eafc51 100644 --- a/.github/workflows/comment-pr.yaml +++ b/.github/workflows/comment-pr.yaml @@ -11,11 +11,9 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 # needed to checkout all branches for this Action to work - - name: Start LocalAI - run: | - echo "Starting LocalAI..." - docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME - until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done + - uses: mudler/localai-github-action@v1 + with: + model: 'hermes-2-theta-llama-3-8b' # Any from models.localai.io, or from huggingface.com with: "huggingface:///file" # Check the PR diff using the current branch and the base branch of the PR - uses: GrantBirki/git-diff-action@v2.7.0 id: git-diff-action diff --git a/.github/workflows/notify-models.yaml b/.github/workflows/notify-models.yaml index 43039b78..d6a7b210 100644 --- a/.github/workflows/notify-models.yaml +++ b/.github/workflows/notify-models.yaml @@ -14,12 +14,10 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 # needed to checkout all branches for this Action to work - - name: Start LocalAI - run: | - echo "Starting LocalAI..." - docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME - until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done - # Check the PR diff using the current branch and the base branch of the PR + - uses: mudler/localai-github-action@v1 + with: + model: 'hermes-2-theta-llama-3-8b' # Any from models.localai.io, or from huggingface.com with: "huggingface:///file" + # Check the PR diff using the current branch and the base branch of the PR - uses: GrantBirki/git-diff-action@v2.7.0 id: git-diff-action with: diff --git a/.github/workflows/notify-releases.yaml b/.github/workflows/notify-releases.yaml index 0f4379aa..faaaacdb 100644 --- a/.github/workflows/notify-releases.yaml +++ b/.github/workflows/notify-releases.yaml @@ -12,11 +12,9 @@ jobs: RELEASE_TITLE: ${{ github.event.release.name }} RELEASE_TAG_NAME: ${{ github.event.release.tag_name }} steps: - - name: Start LocalAI - run: | - echo "Starting LocalAI..." - docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME - until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done + - uses: mudler/localai-github-action@v1 + with: + model: 'hermes-2-theta-llama-3-8b' # Any from models.localai.io, or from huggingface.com with: "huggingface:///file" - name: Summarize id: summarize run: | diff --git a/README.md b/README.md index 858759bf..9107fc37 100644 --- a/README.md +++ b/README.md @@ -134,6 +134,7 @@ Other: - Slack bot https://github.com/mudler/LocalAGI/tree/main/examples/slack - Shell-Pilot(Interact with LLM using LocalAI models via pure shell scripts on your Linux or MacOS system) https://github.com/reid41/shell-pilot - Telegram bot https://github.com/mudler/LocalAI/tree/master/examples/telegram-bot +- Github Actions: https://github.com/marketplace/actions/start-localai - Examples: https://github.com/mudler/LocalAI/tree/master/examples/