2023-09-05 10:54:40 +00:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include "ggml.h"
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t;
|
Threadpool: take 2 (llama/8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-29 23:20:53 +00:00
|
|
|
typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
|
|
|
|
typedef struct ggml_backend * ggml_backend_t;
|
2023-09-05 10:54:40 +00:00
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
// Tensor allocator
|
2024-03-13 17:54:21 +00:00
|
|
|
struct ggml_tallocr {
|
|
|
|
ggml_backend_buffer_t buffer;
|
|
|
|
void * base;
|
|
|
|
size_t alignment;
|
|
|
|
size_t offset;
|
|
|
|
};
|
2023-11-03 19:35:05 +00:00
|
|
|
|
2024-03-13 17:54:21 +00:00
|
|
|
GGML_API struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer);
|
|
|
|
GGML_API void ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor);
|
2023-11-03 19:35:05 +00:00
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
// Graph allocator
|
|
|
|
/*
|
|
|
|
Example usage:
|
|
|
|
ggml_gallocr_t galloc = ggml_gallocr_new(ggml_bacckend_cpu_buffer_type());
|
2023-11-03 19:35:05 +00:00
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
// optional: create a worst-case graph and reserve the buffers to avoid reallocations
|
|
|
|
ggml_gallocr_reserve(galloc, build_graph(max_batch));
|
2023-11-03 19:35:05 +00:00
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
// allocate the graph
|
|
|
|
struct ggml_cgraph * graph = build_graph(batch);
|
|
|
|
ggml_gallocr_alloc_graph(galloc, graph);
|
2023-11-03 19:35:05 +00:00
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
printf("compute buffer size: %zu bytes\n", ggml_gallocr_get_buffer_size(galloc, 0));
|
2023-11-03 19:35:05 +00:00
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
// evaluate the graph
|
|
|
|
ggml_backend_graph_compute(backend, graph);
|
|
|
|
*/
|
2023-11-03 19:35:05 +00:00
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
// special tensor flags for use with the graph allocator:
|
|
|
|
// ggml_set_input(): all input tensors are allocated at the beginning of the graph in non-overlapping addresses
|
|
|
|
// ggml_set_output(): output tensors are never freed and never overwritten
|
2023-11-03 19:35:05 +00:00
|
|
|
|
|
|
|
typedef struct ggml_gallocr * ggml_gallocr_t;
|
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
GGML_API ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft);
|
|
|
|
GGML_API ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs);
|
|
|
|
GGML_API void ggml_gallocr_free(ggml_gallocr_t galloc);
|
2023-09-05 10:54:40 +00:00
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
// pre-allocate buffers from a measure graph - does not allocate or modify the graph
|
|
|
|
// call with a worst-case graph to avoid buffer reallocations
|
|
|
|
// not strictly required for single buffer usage: ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed
|
|
|
|
// returns false if the buffer allocation failed
|
|
|
|
GGML_API bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph * graph);
|
2024-03-13 17:54:21 +00:00
|
|
|
GGML_API bool ggml_gallocr_reserve_n(
|
|
|
|
ggml_gallocr_t galloc,
|
|
|
|
struct ggml_cgraph * graph,
|
|
|
|
const int * node_buffer_ids,
|
|
|
|
const int * leaf_buffer_ids);
|
2023-09-05 10:54:40 +00:00
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
// automatic reallocation if the topology changes when using a single buffer
|
|
|
|
// returns false if using multiple buffers and a re-allocation is needed (call ggml_gallocr_reserve_n first to set the node buffers)
|
|
|
|
GGML_API bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph);
|
2023-09-05 10:54:40 +00:00
|
|
|
|
2024-02-11 12:37:58 +00:00
|
|
|
GGML_API size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id);
|
2023-12-07 20:27:19 +00:00
|
|
|
|
|
|
|
// Utils
|
|
|
|
// Create a buffer and allocate all the tensors in a ggml_context
|
2024-02-11 12:37:58 +00:00
|
|
|
GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft);
|
|
|
|
GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend);
|
2023-12-07 20:27:19 +00:00
|
|
|
|
2023-09-05 10:54:40 +00:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|