fix(makefile): fix go-gpt2 folder and add verification before git clone (#51)

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
This commit is contained in:
Marc R Kellerman 2023-04-21 15:29:32 -07:00 committed by GitHub
parent 79791438fe
commit 05f35b182c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 51 additions and 43 deletions

1
.env
View File

@ -2,3 +2,4 @@
# CONTEXT_SIZE=512 # CONTEXT_SIZE=512
MODELS_PATH=/models MODELS_PATH=/models
# DEBUG=true # DEBUG=true
# BUILD_TYPE=generic

1
.gitignore vendored
View File

@ -1,6 +1,7 @@
# go-llama build artifacts # go-llama build artifacts
go-llama go-llama
go-gpt4all-j go-gpt4all-j
go-gpt2
# LocalAI build binary # LocalAI build binary
LocalAI LocalAI

View File

@ -1,11 +1,12 @@
ARG GO_VERSION=1.20 ARG GO_VERSION=1.20
ARG DEBIAN_VERSION=11 ARG DEBIAN_VERSION=11
ARG BUILD_TYPE=
FROM golang:$GO_VERSION as builder FROM golang:$GO_VERSION as builder
WORKDIR /build WORKDIR /build
RUN apt-get update && apt-get install -y cmake RUN apt-get update && apt-get install -y cmake
COPY . . COPY . .
ARG BUILD_TYPE= RUN make build
RUN make build${BUILD_TYPE}
FROM debian:$DEBIAN_VERSION FROM debian:$DEBIAN_VERSION
COPY --from=builder /build/local-ai /usr/bin/local-ai COPY --from=builder /build/local-ai /usr/bin/local-ai

View File

@ -12,6 +12,20 @@ WHITE := $(shell tput -Txterm setaf 7)
CYAN := $(shell tput -Txterm setaf 6) CYAN := $(shell tput -Txterm setaf 6)
RESET := $(shell tput -Txterm sgr0) RESET := $(shell tput -Txterm sgr0)
C_INCLUDE_PATH=$(shell pwd)/go-llama:$(shell pwd)/go-gpt4all-j:$(shell pwd)/go-gpt2
LIBRARY_PATH=$(shell pwd)/go-llama:$(shell pwd)/go-gpt4all-j:$(shell pwd)/go-gpt2
# Use this if you want to set the default behavior
ifndef BUILD_TYPE
BUILD_TYPE:=default
endif
ifeq ($(BUILD_TYPE), "generic")
GENERIC_PREFIX:=generic-
else
GENERIC_PREFIX:=
endif
.PHONY: all test build vendor .PHONY: all test build vendor
all: help all: help
@ -19,15 +33,18 @@ all: help
## Build: ## Build:
build: prepare ## Build the project build: prepare ## Build the project
C_INCLUDE_PATH=$(shell pwd)/go-llama.cpp:$(shell pwd)/go-gpt4all-j:$(shell pwd)/go-gpt2.cpp LIBRARY_PATH=$(shell pwd)/go-llama.cpp:$(shell pwd)/go-gpt4all-j:$(shell pwd)/go-gpt2.cpp $(GOCMD) build -o $(BINARY_NAME) ./ $(info ${GREEN}I local-ai build info:${RESET})
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) build -o $(BINARY_NAME) ./
buildgeneric: prepare-generic ## Build the project generic-build: ## Build the project using generic
C_INCLUDE_PATH=$(shell pwd)/go-llama.cpp:$(shell pwd)/go-gpt4all-j:$(shell pwd)/go-gpt2.cpp LIBRARY_PATH=$(shell pwd)/go-llama.cpp:$(shell pwd)/go-gpt4all-j:$(shell pwd)/go-gpt2.cpp $(GOCMD) build -o $(BINARY_NAME) ./ BUILD_TYPE="generic" $(MAKE) build
## GPT4ALL-J ## GPT4ALL-J
go-gpt4all-j: go-gpt4all-j:
git clone --recurse-submodules https://github.com/go-skynet/go-gpt4all-j.cpp go-gpt4all-j && cd go-gpt4all-j && git checkout -b build $(GOGPT4ALLJ_VERSION) git clone --recurse-submodules https://github.com/go-skynet/go-gpt4all-j.cpp go-gpt4all-j
# This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml.. cd go-gpt4all-j && git checkout -b build $(GOGPT4ALLJ_VERSION)
# This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml..
@find ./go-gpt4all-j -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} + @find ./go-gpt4all-j -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} +
@find ./go-gpt4all-j -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} + @find ./go-gpt4all-j -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} +
@find ./go-gpt4all-j -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} + @find ./go-gpt4all-j -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} +
@ -38,54 +55,46 @@ go-gpt4all-j:
@find ./go-gpt4all-j -type f -name "*.cpp" -exec sed -i'' -e 's/::replace/::json_gptj_replace/g' {} + @find ./go-gpt4all-j -type f -name "*.cpp" -exec sed -i'' -e 's/::replace/::json_gptj_replace/g' {} +
go-gpt4all-j/libgptj.a: go-gpt4all-j go-gpt4all-j/libgptj.a: go-gpt4all-j
$(MAKE) -C go-gpt4all-j libgptj.a $(MAKE) -C go-gpt4all-j $(GENERIC_PREFIX)libgptj.a
go-gpt4all-j/libgptj.a-generic: go-gpt4all-j
$(MAKE) -C go-gpt4all-j generic-libgptj.a
# CEREBRAS GPT # CEREBRAS GPT
go-gpt2.cpp: go-gpt2:
git clone --recurse-submodules https://github.com/go-skynet/go-gpt2.cpp go-gpt2.cpp && cd go-gpt2.cpp && git checkout -b build $(GOGPT2_VERSION) git clone --recurse-submodules https://github.com/go-skynet/go-gpt2.cpp go-gpt2
# This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml.. cd go-gpt2 && git checkout -b build $(GOGPT2_VERSION)
@find ./go-gpt2.cpp -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} + # This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml..
@find ./go-gpt2.cpp -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} + @find ./go-gpt2 -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
@find ./go-gpt2.cpp -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} + @find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
@find ./go-gpt2.cpp -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_/gpt2_/g' {} + @find ./go-gpt2 -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
@find ./go-gpt2.cpp -type f -name "*.h" -exec sed -i'' -e 's/gpt_/gpt2_/g' {} + @find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_/gpt2_/g' {} +
@find ./go-gpt2.cpp -type f -name "*.cpp" -exec sed -i'' -e 's/json_/json_gpt2_/g' {} + @find ./go-gpt2 -type f -name "*.h" -exec sed -i'' -e 's/gpt_/gpt2_/g' {} +
@find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/json_/json_gpt2_/g' {} +
go-gpt2.cpp/libgpt2.a: go-gpt2.cpp go-gpt2/libgpt2.a: go-gpt2
$(MAKE) -C go-gpt2.cpp libgpt2.a $(MAKE) -C go-gpt2 $(GENERIC_PREFIX)libgpt2.a
go-gpt2.cpp/libgpt2.a-generic: go-gpt2.cpp
$(MAKE) -C go-gpt2.cpp generic-libgpt2.a
go-llama: go-llama:
git clone -b $(GOLLAMA_VERSION) --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama git clone -b $(GOLLAMA_VERSION) --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama
$(MAKE) -C go-llama libbinding.a
go-llama-generic: go-llama/libbinding.a: go-llama
git clone -b $(GOLLAMA_VERSION) --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama $(MAKE) -C go-llama $(GENERIC_PREFIX)libbinding.a
$(MAKE) -C go-llama generic-libbinding.a
replace: replace:
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(shell pwd)/go-llama $(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(shell pwd)/go-llama
$(GOCMD) mod edit -replace github.com/go-skynet/go-gpt4all-j.cpp=$(shell pwd)/go-gpt4all-j $(GOCMD) mod edit -replace github.com/go-skynet/go-gpt4all-j.cpp=$(shell pwd)/go-gpt4all-j
$(GOCMD) mod edit -replace github.com/go-skynet/go-gpt2.cpp=$(shell pwd)/go-gpt2.cpp $(GOCMD) mod edit -replace github.com/go-skynet/go-gpt2.cpp=$(shell pwd)/go-gpt2
prepare: go-llama go-gpt4all-j/libgptj.a go-gpt2.cpp/libgpt2.a replace prepare: go-llama/libbinding.a go-gpt4all-j/libgptj.a go-gpt2/libgpt2.a replace
prepare-generic: go-llama-generic go-gpt4all-j/libgptj.a-generic go-gpt2.cpp/libgpt2.a-generic replace
clean: ## Remove build related file clean: ## Remove build related file
rm -fr ./go-llama rm -fr ./go-llama
rm -rf ./go-gpt4all-j rm -rf ./go-gpt4all-j
rm -rf ./go-gpt2.cpp rm -rf ./go-gpt2
rm -rf $(BINARY_NAME) rm -rf $(BINARY_NAME)
## Run: ## Run:
run: prepare run: prepare
$(GOCMD) run ./ api C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) run ./main.go
## Test: ## Test:
test: ## Run the tests of the project test: ## Run the tests of the project

View File

@ -6,14 +6,10 @@ services:
build: build:
context: . context: .
dockerfile: Dockerfile dockerfile: Dockerfile
# args:
# BUILD_TYPE: generic # Uncomment to build CPU generic code that works on most HW
ports: ports:
- 8080:8080 - 8080:8080
environment: env_file:
- MODELS_PATH=$MODELS_PATH - .env
- CONTEXT_SIZE=$CONTEXT_SIZE
- THREADS=$THREADS
- DEBUG=$DEBUG
volumes: volumes:
- ./models:/models:cached - ./models:/models:cached
command: ["/usr/bin/local-ai" ]