2023-04-19 16:43:10 +00:00
|
|
|
module github.com/go-skynet/LocalAI
|
2023-03-18 22:59:06 +00:00
|
|
|
|
2024-06-08 20:13:02 +00:00
|
|
|
go 1.22
|
2024-04-23 07:22:58 +00:00
|
|
|
|
|
|
|
toolchain go1.22.2
|
2023-03-18 22:59:06 +00:00
|
|
|
|
|
|
|
require (
|
2023-12-24 19:27:24 +00:00
|
|
|
github.com/M0Rf30/go-tiny-dream v0.0.0-20231128165230-772a9c0d9aaf
|
2024-03-28 20:52:52 +00:00
|
|
|
github.com/Masterminds/sprig/v3 v3.2.3
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/alecthomas/kong v0.9.0
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/census-instrumentation/opencensus-proto v0.4.1
|
2024-04-11 15:41:58 +00:00
|
|
|
github.com/charmbracelet/glamour v0.7.0
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/chasefleming/elem-go v0.25.0
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50
|
|
|
|
github.com/containerd/containerd v1.7.18
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/donomii/go-rwkv.cpp v0.0.0-20240228065144-661e7ae26d44
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/elliotchance/orderedmap/v2 v2.2.0
|
2024-03-01 15:19:53 +00:00
|
|
|
github.com/fsnotify/fsnotify v1.7.0
|
2023-06-28 21:43:50 +00:00
|
|
|
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230628193450-85ed71aaec8e
|
2023-05-09 09:43:50 +00:00
|
|
|
github.com/go-audio/wav v1.1.0
|
2023-07-16 20:01:53 +00:00
|
|
|
github.com/go-skynet/go-bert.cpp v0.0.0-20230716133540-6abe312cded1
|
2023-10-11 16:19:13 +00:00
|
|
|
github.com/go-skynet/go-llama.cpp v0.0.0-20231009155254-aeba71ee8428
|
2024-04-11 16:52:54 +00:00
|
|
|
github.com/gofiber/fiber/v2 v2.52.4
|
2024-04-11 07:19:24 +00:00
|
|
|
github.com/gofiber/swagger v1.0.0
|
2024-03-28 20:52:52 +00:00
|
|
|
github.com/gofiber/template/html/v2 v2.1.1
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/google/go-containerregistry v0.19.1
|
2024-06-04 06:39:19 +00:00
|
|
|
github.com/google/uuid v1.6.0
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
2023-07-14 23:19:43 +00:00
|
|
|
github.com/hpcloud/tail v1.0.0
|
2023-05-27 17:42:38 +00:00
|
|
|
github.com/imdario/mergo v0.3.16
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/ipfs/go-log v1.0.5
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/jaypipes/ghw v0.12.0
|
|
|
|
github.com/joho/godotenv v1.5.1
|
|
|
|
github.com/klauspost/cpuid/v2 v2.2.7
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/libp2p/go-libp2p v0.31.0
|
2023-06-24 06:18:17 +00:00
|
|
|
github.com/mholt/archiver/v3 v3.5.1
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/microcosm-cc/bluemonday v1.0.26
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/mudler/edgevpn v0.25.3
|
2023-08-18 23:49:33 +00:00
|
|
|
github.com/mudler/go-processmanager v0.0.0-20230818213616-f204007f963c
|
2023-06-05 22:35:16 +00:00
|
|
|
github.com/mudler/go-stable-diffusion v0.0.0-20230605122230-d89260f598af
|
2023-10-22 14:58:45 +00:00
|
|
|
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20231022042237-c25dc5193530
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/onsi/ginkgo/v2 v2.17.1
|
|
|
|
github.com/onsi/gomega v1.33.0
|
2024-03-28 20:52:52 +00:00
|
|
|
github.com/ory/dockertest/v3 v3.10.0
|
2023-08-26 12:18:06 +00:00
|
|
|
github.com/otiai10/openaigo v1.6.0
|
2023-07-14 23:19:43 +00:00
|
|
|
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5
|
2023-10-17 16:22:53 +00:00
|
|
|
github.com/prometheus/client_golang v1.17.0
|
2024-04-20 08:43:37 +00:00
|
|
|
github.com/rs/zerolog v1.32.0
|
2024-04-11 07:19:24 +00:00
|
|
|
github.com/russross/blackfriday v1.6.0
|
2024-03-22 20:13:11 +00:00
|
|
|
github.com/sashabaranov/go-openai v1.20.4
|
2023-10-12 08:45:34 +00:00
|
|
|
github.com/schollz/progressbar/v3 v3.13.1
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/shirou/gopsutil/v3 v3.23.9
|
2024-03-29 21:29:33 +00:00
|
|
|
github.com/stretchr/testify v1.9.0
|
2024-04-11 07:19:24 +00:00
|
|
|
github.com/swaggo/swag v1.16.3
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/thxcode/gguf-parser-go v0.0.6
|
2023-10-20 15:03:01 +00:00
|
|
|
github.com/tmc/langchaingo v0.0.0-20231019140956-c636b3da7701
|
2024-03-29 21:29:33 +00:00
|
|
|
github.com/valyala/fasthttp v1.51.0
|
2024-06-22 06:17:41 +00:00
|
|
|
go.opentelemetry.io/otel v1.24.0
|
2023-10-17 16:22:53 +00:00
|
|
|
go.opentelemetry.io/otel/exporters/prometheus v0.42.0
|
2024-06-22 06:17:41 +00:00
|
|
|
go.opentelemetry.io/otel/metric v1.24.0
|
2023-10-17 16:22:53 +00:00
|
|
|
go.opentelemetry.io/otel/sdk/metric v1.19.0
|
2024-06-22 06:17:41 +00:00
|
|
|
google.golang.org/api v0.169.0
|
2024-06-04 06:39:19 +00:00
|
|
|
google.golang.org/grpc v1.64.0
|
|
|
|
google.golang.org/protobuf v1.34.1
|
2023-05-18 19:12:42 +00:00
|
|
|
gopkg.in/yaml.v2 v2.4.0
|
2023-04-28 17:24:49 +00:00
|
|
|
gopkg.in/yaml.v3 v3.0.1
|
2024-06-22 06:17:41 +00:00
|
|
|
oras.land/oras-go/v2 v2.5.0
|
2023-03-18 22:59:06 +00:00
|
|
|
)
|
|
|
|
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
require (
|
2024-06-22 06:17:41 +00:00
|
|
|
cloud.google.com/go/compute v1.25.1 // indirect
|
|
|
|
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
|
|
|
github.com/Microsoft/hcsshim v0.11.5 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/benbjohnson/clock v1.3.5 // indirect
|
|
|
|
github.com/c-robinson/iplib v1.0.8 // indirect
|
|
|
|
github.com/containerd/cgroups v1.1.0 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/containerd/errdefs v0.1.0 // indirect
|
|
|
|
github.com/containerd/log v0.1.0 // indirect
|
|
|
|
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
|
|
|
github.com/creachadair/otp v0.4.2 // indirect
|
|
|
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
|
|
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/docker/distribution v2.8.2+incompatible // indirect
|
|
|
|
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
2024-06-08 20:13:02 +00:00
|
|
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/elastic/gosigar v0.14.2 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect
|
|
|
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/flynn/noise v1.0.0 // indirect
|
|
|
|
github.com/francoispqt/gojay v1.2.13 // indirect
|
|
|
|
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/golang/mock v1.6.0 // indirect
|
|
|
|
github.com/google/btree v1.1.2 // indirect
|
|
|
|
github.com/google/gopacket v1.1.19 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/google/s2a-go v0.1.7 // indirect
|
|
|
|
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
|
|
|
github.com/googleapis/gax-go/v2 v2.12.2 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/gorilla/websocket v1.5.0 // indirect
|
|
|
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
|
|
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
|
|
|
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
|
|
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
2024-06-08 20:13:02 +00:00
|
|
|
github.com/henvic/httpretty v0.1.3 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/huin/goupnp v1.2.0 // indirect
|
|
|
|
github.com/ipfs/boxo v0.10.0 // indirect
|
|
|
|
github.com/ipfs/go-cid v0.4.1 // indirect
|
|
|
|
github.com/ipfs/go-datastore v0.6.0 // indirect
|
|
|
|
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
|
|
|
github.com/ipld/go-ipld-prime v0.20.0 // indirect
|
|
|
|
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
|
|
|
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
|
|
|
github.com/jbenet/goprocess v0.1.4 // indirect
|
|
|
|
github.com/koron/go-ssdp v0.0.4 // indirect
|
|
|
|
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
|
|
|
github.com/libp2p/go-cidranger v1.1.0 // indirect
|
|
|
|
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
|
|
|
github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect
|
|
|
|
github.com/libp2p/go-libp2p-kad-dht v0.25.2 // indirect
|
|
|
|
github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect
|
|
|
|
github.com/libp2p/go-libp2p-pubsub v0.9.3 // indirect
|
|
|
|
github.com/libp2p/go-libp2p-record v0.2.0 // indirect
|
|
|
|
github.com/libp2p/go-libp2p-routing-helpers v0.7.2 // indirect
|
|
|
|
github.com/libp2p/go-msgio v0.3.0 // indirect
|
|
|
|
github.com/libp2p/go-nat v0.2.0 // indirect
|
|
|
|
github.com/libp2p/go-netroute v0.2.1 // indirect
|
|
|
|
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
|
|
|
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
|
|
|
|
github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
|
|
|
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
|
|
|
github.com/miekg/dns v1.1.59 // indirect
|
|
|
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
|
|
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
|
|
|
github.com/minio/sha256-simd v1.0.1 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/moby/sys/sequential v0.5.0 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
|
|
|
github.com/mudler/water v0.0.0-20221010214108-8c7313014ce0 // indirect
|
|
|
|
github.com/multiformats/go-base32 v0.1.0 // indirect
|
|
|
|
github.com/multiformats/go-base36 v0.2.0 // indirect
|
|
|
|
github.com/multiformats/go-multiaddr v0.12.3 // indirect
|
|
|
|
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
|
|
|
|
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
|
|
|
github.com/multiformats/go-multibase v0.2.0 // indirect
|
|
|
|
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
|
|
|
github.com/multiformats/go-multihash v0.2.3 // indirect
|
|
|
|
github.com/multiformats/go-multistream v0.4.1 // indirect
|
|
|
|
github.com/multiformats/go-varint v0.0.7 // indirect
|
|
|
|
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
|
|
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
|
|
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
|
|
|
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
2024-06-04 19:43:46 +00:00
|
|
|
github.com/philhofer/fwd v1.1.2 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/polydawn/refmt v0.89.0 // indirect
|
|
|
|
github.com/quic-go/qpack v0.4.0 // indirect
|
|
|
|
github.com/quic-go/qtls-go1-20 v0.3.3 // indirect
|
|
|
|
github.com/quic-go/quic-go v0.38.1 // indirect
|
|
|
|
github.com/quic-go/webtransport-go v0.5.3 // indirect
|
|
|
|
github.com/raulk/go-watchdog v1.3.0 // indirect
|
2024-06-08 20:13:02 +00:00
|
|
|
github.com/smallnest/ringbuffer v0.0.0-20240423223918-bab516b2000b // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/songgao/packets v0.0.0-20160404182456-549a10cd4091 // indirect
|
|
|
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
2024-06-04 19:43:46 +00:00
|
|
|
github.com/tinylib/msgp v1.1.8 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/vbatts/tar-split v0.11.3 // indirect
|
|
|
|
github.com/vishvananda/netlink v1.2.1-beta.2 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect
|
|
|
|
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
|
|
|
|
go.opencensus.io v0.24.0 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
go.uber.org/dig v1.17.0 // indirect
|
|
|
|
go.uber.org/fx v1.20.0 // indirect
|
|
|
|
go.uber.org/multierr v1.11.0 // indirect
|
|
|
|
go.uber.org/zap v1.27.0 // indirect
|
2024-06-08 20:13:02 +00:00
|
|
|
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
golang.org/x/oauth2 v0.18.0 // indirect
|
2024-06-08 20:13:02 +00:00
|
|
|
golang.org/x/sync v0.7.0 // indirect
|
2024-06-04 06:39:19 +00:00
|
|
|
golang.org/x/sys v0.20.0 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 // indirect
|
|
|
|
golang.zx2c4.com/wireguard v0.0.0-20220703234212-c31a7b1ab478 // indirect
|
|
|
|
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
|
|
|
gonum.org/v1/gonum v0.13.0 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
google.golang.org/appengine v1.6.8 // indirect
|
|
|
|
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
|
|
|
|
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
lukechampine.com/blake3 v1.2.1 // indirect
|
|
|
|
)
|
|
|
|
|
2023-06-24 06:18:17 +00:00
|
|
|
require (
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
2024-03-29 21:29:33 +00:00
|
|
|
github.com/KyleBanks/depth v1.2.1 // indirect
|
2024-03-21 00:12:20 +00:00
|
|
|
github.com/Masterminds/goutils v1.1.1 // indirect
|
|
|
|
github.com/Masterminds/semver/v3 v3.2.0 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/Microsoft/go-winio v0.6.2 // indirect
|
2024-03-22 20:13:11 +00:00
|
|
|
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
2024-05-05 07:10:23 +00:00
|
|
|
github.com/StackExchange/wmi v1.2.1 // indirect
|
2024-04-11 15:41:58 +00:00
|
|
|
github.com/alecthomas/chroma/v2 v2.8.0 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/andybalholm/brotli v1.0.5 // indirect
|
2024-04-11 15:41:58 +00:00
|
|
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
2024-03-13 20:50:46 +00:00
|
|
|
github.com/aymerick/douceur v0.2.0 // indirect
|
2023-10-17 16:22:53 +00:00
|
|
|
github.com/beorn7/perks v1.0.1 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
2023-10-17 16:22:53 +00:00
|
|
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/containerd/continuity v0.4.2 // indirect
|
2024-02-18 10:12:02 +00:00
|
|
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
2023-06-25 14:35:59 +00:00
|
|
|
github.com/dlclark/regexp2 v1.8.1 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/docker/cli v24.0.0+incompatible // indirect
|
|
|
|
github.com/docker/docker v24.0.9+incompatible
|
2024-03-22 20:13:11 +00:00
|
|
|
github.com/docker/go-connections v0.4.0 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/docker/go-units v0.5.0 // indirect
|
2023-06-24 06:18:17 +00:00
|
|
|
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
2024-05-05 07:10:23 +00:00
|
|
|
github.com/ghodss/yaml v1.0.0 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/go-audio/audio v1.0.0 // indirect
|
|
|
|
github.com/go-audio/riff v1.0.0 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/go-logr/logr v1.4.1 // indirect
|
2023-10-17 16:22:53 +00:00
|
|
|
github.com/go-logr/stdr v1.2.2 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/go-ole/go-ole v1.2.6 // indirect
|
2024-03-29 21:29:33 +00:00
|
|
|
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
|
|
|
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
|
|
|
github.com/go-openapi/spec v0.21.0 // indirect
|
|
|
|
github.com/go-openapi/swag v0.23.0 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
|
|
|
github.com/gofiber/contrib/fiberzerolog v1.0.0
|
2024-03-27 20:10:58 +00:00
|
|
|
github.com/gofiber/template v1.8.3 // indirect
|
|
|
|
github.com/gofiber/utils v1.1.0 // indirect
|
2024-03-22 20:13:11 +00:00
|
|
|
github.com/gogo/protobuf v1.3.2 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/golang/protobuf v1.5.4
|
2023-06-24 06:18:17 +00:00
|
|
|
github.com/golang/snappy v0.0.2 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/google/go-cmp v0.6.0 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f // indirect
|
2024-03-22 20:13:11 +00:00
|
|
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
2024-04-26 08:34:50 +00:00
|
|
|
github.com/gorilla/css v1.0.1 // indirect
|
2024-03-21 00:12:20 +00:00
|
|
|
github.com/huandu/xstrings v1.3.3 // indirect
|
2024-05-05 07:10:23 +00:00
|
|
|
github.com/jaypipes/pcidb v1.0.0 // indirect
|
2024-03-29 21:29:33 +00:00
|
|
|
github.com/josharian/intern v1.0.0 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/klauspost/compress v1.17.0 // indirect
|
2023-06-24 06:18:17 +00:00
|
|
|
github.com/klauspost/pgzip v1.2.5 // indirect
|
2024-03-13 20:50:46 +00:00
|
|
|
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
2024-03-29 21:29:33 +00:00
|
|
|
github.com/mailru/easyjson v0.7.7 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
|
|
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
|
|
|
github.com/mattn/go-runewidth v0.0.15 // indirect
|
2023-10-17 16:22:53 +00:00
|
|
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
2023-10-12 08:45:34 +00:00
|
|
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/mitchellh/copystructure v1.2.0 // indirect
|
2024-05-05 07:10:23 +00:00
|
|
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
2024-03-22 20:13:11 +00:00
|
|
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/mudler/go-piper v0.0.0-20230621222733-56b8a81b4760
|
2024-03-13 20:50:46 +00:00
|
|
|
github.com/muesli/reflow v0.3.0 // indirect
|
2024-04-11 15:41:58 +00:00
|
|
|
github.com/muesli/termenv v0.15.2 // indirect
|
2023-06-24 06:18:17 +00:00
|
|
|
github.com/nwaples/rardecode v1.1.0 // indirect
|
2024-03-13 20:50:46 +00:00
|
|
|
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
2024-03-22 20:13:11 +00:00
|
|
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/opencontainers/image-spec v1.1.0 // indirect
|
2024-04-11 18:57:33 +00:00
|
|
|
github.com/opencontainers/runc v1.1.12 // indirect
|
2023-06-24 06:18:17 +00:00
|
|
|
github.com/pierrec/lz4/v4 v4.1.2 // indirect
|
2024-03-22 20:13:11 +00:00
|
|
|
github.com/pkg/errors v0.9.1 // indirect
|
2023-06-25 14:35:59 +00:00
|
|
|
github.com/pkoukk/tiktoken-go v0.1.2 // indirect
|
2024-02-18 10:12:02 +00:00
|
|
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
2023-10-17 16:22:53 +00:00
|
|
|
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
|
|
|
|
github.com/prometheus/common v0.44.0 // indirect
|
|
|
|
github.com/prometheus/procfs v0.11.1 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/rivo/uniseg v0.2.0 // indirect
|
|
|
|
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/shopspring/decimal v1.3.1 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
github.com/spf13/cast v1.5.0 // indirect
|
2024-03-29 21:29:33 +00:00
|
|
|
github.com/swaggo/files/v2 v2.0.0 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
|
|
|
github.com/tklauser/numcpus v0.6.1 // indirect
|
2023-06-24 06:18:17 +00:00
|
|
|
github.com/ulikunitz/xz v0.5.9 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
|
|
|
github.com/valyala/tcplisten v1.0.0 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
2024-03-22 20:13:11 +00:00
|
|
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
|
|
|
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
2023-06-24 06:18:17 +00:00
|
|
|
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
2024-04-11 15:41:58 +00:00
|
|
|
github.com/yuin/goldmark v1.5.4 // indirect
|
|
|
|
github.com/yuin/goldmark-emoji v1.0.2 // indirect
|
2024-05-13 09:37:52 +00:00
|
|
|
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
2024-06-22 06:17:41 +00:00
|
|
|
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
|
|
|
|
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
2024-06-04 06:39:19 +00:00
|
|
|
golang.org/x/crypto v0.23.0 // indirect
|
2024-06-08 20:13:02 +00:00
|
|
|
golang.org/x/mod v0.17.0 // indirect
|
2024-06-04 06:39:19 +00:00
|
|
|
golang.org/x/net v0.25.0 // indirect
|
|
|
|
golang.org/x/term v0.20.0 // indirect
|
|
|
|
golang.org/x/text v0.15.0 // indirect
|
2024-06-08 20:13:02 +00:00
|
|
|
golang.org/x/tools v0.21.0 // indirect
|
2024-06-04 06:39:19 +00:00
|
|
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
|
2023-07-14 23:19:43 +00:00
|
|
|
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
|
|
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
2024-05-05 07:10:23 +00:00
|
|
|
howett.net/plist v1.0.0 // indirect
|
2023-06-24 06:18:17 +00:00
|
|
|
)
|
2024-06-22 06:17:41 +00:00
|
|
|
|