mirror of
https://github.com/mudler/LocalAI.git
synced 2025-02-14 14:41:56 +00:00
Some checks are pending
Explorer deployment / build-linux (push) Waiting to run
GPU tests / ubuntu-latest (1.21.x) (push) Waiting to run
generate and publish GRPC docker caches / generate_caches (ubuntu:22.04, linux/amd64,linux/arm64, ubuntu-latest) (push) Waiting to run
generate and publish intel docker caches / generate_caches (intel/oneapi-basekit:2025.0.0-0-devel-ubuntu22.04, linux/amd64, ubuntu-latest) (push) Waiting to run
build container images / hipblas-jobs (-aio-gpu-hipblas, rocm/dev-ubuntu-22.04:6.1, hipblas, true, ubuntu:22.04, extras, latest-gpu-hipblas, latest-aio-gpu-hipblas, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, auto, -hipblas-ffmpeg) (push) Waiting to run
build container images / hipblas-jobs (rocm/dev-ubuntu-22.04:6.1, hipblas, false, ubuntu:22.04, core, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -hipblas-core) (push) Waiting to run
build container images / hipblas-jobs (rocm/dev-ubuntu-22.04:6.1, hipblas, false, ubuntu:22.04, extras, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -hipblas) (push) Waiting to run
build container images / hipblas-jobs (rocm/dev-ubuntu-22.04:6.1, hipblas, true, ubuntu:22.04, core, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -hipblas-ffmpeg-core) (push) Waiting to run
build container images / self-hosted-jobs (-aio-gpu-intel-f16, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, true, ubuntu:22.04, extras, latest-gpu-intel-f16, latest-aio-gpu-intel-f16, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, auto, -sycl-f16-ffmpeg) (push) Waiting to run
build container images / self-hosted-jobs (-aio-gpu-intel-f32, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, true, ubuntu:22.04, extras, latest-gpu-intel-f32, latest-aio-gpu-intel-f32, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, auto, -sycl-f32-ffmpeg) (push) Waiting to run
build container images / self-hosted-jobs (-aio-gpu-nvidia-cuda-11, ubuntu:22.04, cublas, 11, 7, true, extras, latest-gpu-nvidia-cuda-11, latest-aio-gpu-nvidia-cuda-11, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, auto, -cublas-cuda11-ffmpeg) (push) Waiting to run
build container images / self-hosted-jobs (-aio-gpu-nvidia-cuda-12, ubuntu:22.04, cublas, 12, 0, true, extras, latest-gpu-nvidia-cuda-12, latest-aio-gpu-nvidia-cuda-12, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, auto, -cublas-cuda12-ffmpeg) (push) Waiting to run
build container images / self-hosted-jobs (quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, false, ubuntu:22.04, core, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -sycl-f16-core) (push) Waiting to run
build container images / self-hosted-jobs (quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, true, ubuntu:22.04, core, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -sycl-f16-ffmpeg-core) (push) Waiting to run
build container images / self-hosted-jobs (quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, false, ubuntu:22.04, core, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -sycl-f32-core) (push) Waiting to run
build container images / self-hosted-jobs (quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, true, ubuntu:22.04, core, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -sycl-f32-ffmpeg-core) (push) Waiting to run
build container images / self-hosted-jobs (ubuntu:22.04, , , extras, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, auto, ) (push) Waiting to run
build container images / self-hosted-jobs (ubuntu:22.04, , true, extras, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, auto, -ffmpeg) (push) Waiting to run
build container images / self-hosted-jobs (ubuntu:22.04, cublas, 11, 7, , extras, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -cublas-cuda11) (push) Waiting to run
build container images / self-hosted-jobs (ubuntu:22.04, cublas, 12, 0, , extras, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -cublas-cuda12) (push) Waiting to run
build container images / core-image-build (-aio-cpu, ubuntu:22.04, , true, core, latest-cpu, latest-aio-cpu, --jobs=4 --output-sync=target, linux/amd64,linux/arm64, arc-runner-set, false, auto, -ffmpeg-core) (push) Waiting to run
build container images / core-image-build (ubuntu:22.04, cublas, 11, 7, , core, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -cublas-cuda11-core) (push) Waiting to run
build container images / core-image-build (ubuntu:22.04, cublas, 11, 7, true, core, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -cublas-cuda11-ffmpeg-core) (push) Waiting to run
build container images / core-image-build (ubuntu:22.04, cublas, 12, 0, , core, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -cublas-cuda12-core) (push) Waiting to run
build container images / core-image-build (ubuntu:22.04, cublas, 12, 0, true, core, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -cublas-cuda12-ffmpeg-core) (push) Waiting to run
build container images / core-image-build (ubuntu:22.04, vulkan, true, core, latest-vulkan-ffmpeg-core, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -vulkan-ffmpeg-core) (push) Waiting to run
build container images / gh-runner (nvcr.io/nvidia/l4t-jetpack:r36.4.0, cublas, 12, 0, true, core, latest-nvidia-l4t-arm64-core, --jobs=4 --output-sync=target, linux/arm64, ubuntu-24.04-arm, true, false, -nvidia-l4t-arm64-core) (push) Waiting to run
Security Scan / tests (push) Waiting to run
Tests extras backends / tests-transformers (push) Waiting to run
Tests extras backends / tests-rerankers (push) Waiting to run
Tests extras backends / tests-diffusers (push) Waiting to run
Tests extras backends / tests-coqui (push) Waiting to run
tests / tests-linux (1.21.x) (push) Waiting to run
tests / tests-aio-container (push) Waiting to run
tests / tests-apple (1.21.x) (push) Waiting to run
Bumps [GrantBirki/git-diff-action](https://github.com/grantbirki/git-diff-action) from 2.7.0 to 2.8.0. - [Release notes](https://github.com/grantbirki/git-diff-action/releases) - [Commits](https://github.com/grantbirki/git-diff-action/compare/v2.7.0...v2.8.0) --- updated-dependencies: - dependency-name: GrantBirki/git-diff-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
169 lines
6.5 KiB
YAML
169 lines
6.5 KiB
YAML
name: Notifications for new models
|
|
on:
|
|
pull_request:
|
|
types:
|
|
- closed
|
|
|
|
jobs:
|
|
notify-discord:
|
|
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
|
env:
|
|
MODEL_NAME: hermes-2-theta-llama-3-8b
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
with:
|
|
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
|
- uses: mudler/localai-github-action@v1
|
|
with:
|
|
model: 'hermes-2-theta-llama-3-8b' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
|
# Check the PR diff using the current branch and the base branch of the PR
|
|
- uses: GrantBirki/git-diff-action@v2.8.0
|
|
id: git-diff-action
|
|
with:
|
|
json_diff_file_output: diff.json
|
|
raw_diff_file_output: diff.txt
|
|
file_output_only: "true"
|
|
- name: Summarize
|
|
env:
|
|
DIFF: ${{ steps.git-diff-action.outputs.raw-diff-path }}
|
|
id: summarize
|
|
run: |
|
|
input="$(cat $DIFF)"
|
|
|
|
# Define the LocalAI API endpoint
|
|
API_URL="http://localhost:8080/chat/completions"
|
|
|
|
# Create a JSON payload using jq to handle special characters
|
|
json_payload=$(jq -n --arg input "$input" '{
|
|
model: "'$MODEL_NAME'",
|
|
messages: [
|
|
{
|
|
role: "system",
|
|
content: "You are LocalAI-bot. Write a discord message to notify everyone about the new model from the git diff. Make it informal. An example can include: the URL of the model, the name, and a brief description of the model if exists. Also add an hint on how to install it in LocalAI and that can be browsed over https://models.localai.io. For example: local-ai run model_name_here"
|
|
},
|
|
{
|
|
role: "user",
|
|
content: $input
|
|
}
|
|
]
|
|
}')
|
|
|
|
# Send the request to LocalAI
|
|
response=$(curl -s -X POST $API_URL \
|
|
-H "Content-Type: application/json" \
|
|
-d "$json_payload")
|
|
|
|
# Extract the summary from the response
|
|
summary="$(echo $response | jq -r '.choices[0].message.content')"
|
|
|
|
# Print the summary
|
|
# -H "Authorization: Bearer $API_KEY" \
|
|
echo "Summary:"
|
|
echo "$summary"
|
|
echo "payload sent"
|
|
echo "$json_payload"
|
|
{
|
|
echo 'message<<EOF'
|
|
echo "$summary"
|
|
echo EOF
|
|
} >> "$GITHUB_OUTPUT"
|
|
docker logs --tail 10 local-ai
|
|
- name: Discord notification
|
|
env:
|
|
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
|
DISCORD_USERNAME: "LocalAI-Bot"
|
|
DISCORD_AVATAR: "https://avatars.githubusercontent.com/u/139863280?v=4"
|
|
uses: Ilshidur/action-discord@master
|
|
with:
|
|
args: ${{ steps.summarize.outputs.message }}
|
|
- name: Setup tmate session if fails
|
|
if: ${{ failure() }}
|
|
uses: mxschmitt/action-tmate@v3.19
|
|
with:
|
|
detached: true
|
|
connect-timeout-seconds: 180
|
|
limit-access-to-actor: true
|
|
notify-twitter:
|
|
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
|
env:
|
|
MODEL_NAME: hermes-2-theta-llama-3-8b
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
with:
|
|
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
|
- name: Start LocalAI
|
|
run: |
|
|
echo "Starting LocalAI..."
|
|
docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME
|
|
until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done
|
|
# Check the PR diff using the current branch and the base branch of the PR
|
|
- uses: GrantBirki/git-diff-action@v2.8.0
|
|
id: git-diff-action
|
|
with:
|
|
json_diff_file_output: diff.json
|
|
raw_diff_file_output: diff.txt
|
|
file_output_only: "true"
|
|
- name: Summarize
|
|
env:
|
|
DIFF: ${{ steps.git-diff-action.outputs.raw-diff-path }}
|
|
id: summarize
|
|
run: |
|
|
input="$(cat $DIFF)"
|
|
|
|
# Define the LocalAI API endpoint
|
|
API_URL="http://localhost:8080/chat/completions"
|
|
|
|
# Create a JSON payload using jq to handle special characters
|
|
json_payload=$(jq -n --arg input "$input" '{
|
|
model: "'$MODEL_NAME'",
|
|
messages: [
|
|
{
|
|
role: "system",
|
|
content: "You are LocalAI-bot. Write a twitter message to notify everyone about the new model from the git diff. Make it informal and really short. An example can include: the name, and a brief description of the model if exists. Also add an hint on how to install it in LocalAI. For example: local-ai run model_name_here"
|
|
},
|
|
{
|
|
role: "user",
|
|
content: $input
|
|
}
|
|
]
|
|
}')
|
|
|
|
# Send the request to LocalAI
|
|
response=$(curl -s -X POST $API_URL \
|
|
-H "Content-Type: application/json" \
|
|
-d "$json_payload")
|
|
|
|
# Extract the summary from the response
|
|
summary="$(echo $response | jq -r '.choices[0].message.content')"
|
|
|
|
# Print the summary
|
|
# -H "Authorization: Bearer $API_KEY" \
|
|
echo "Summary:"
|
|
echo "$summary"
|
|
echo "payload sent"
|
|
echo "$json_payload"
|
|
{
|
|
echo 'message<<EOF'
|
|
echo "$summary"
|
|
echo EOF
|
|
} >> "$GITHUB_OUTPUT"
|
|
docker logs --tail 10 local-ai
|
|
- uses: Eomm/why-don-t-you-tweet@v2
|
|
with:
|
|
tweet-message: ${{ steps.summarize.outputs.message }}
|
|
env:
|
|
# Get your tokens from https://developer.twitter.com/apps
|
|
TWITTER_CONSUMER_API_KEY: ${{ secrets.TWITTER_APP_KEY }}
|
|
TWITTER_CONSUMER_API_SECRET: ${{ secrets.TWITTER_APP_SECRET }}
|
|
TWITTER_ACCESS_TOKEN: ${{ secrets.TWITTER_ACCESS_TOKEN }}
|
|
TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
|
|
- name: Setup tmate session if fails
|
|
if: ${{ failure() }}
|
|
uses: mxschmitt/action-tmate@v3.19
|
|
with:
|
|
detached: true
|
|
connect-timeout-seconds: 180
|
|
limit-access-to-actor: true
|