base: statically check for reasonable Tslab block size

Per default Tslab checks that 8 slabs incl. overhead fit into one block.
If this is not desired the template parameter 'MIN_SLABS_PER_BLOCK' can
be used to control the minimum number of blocks.

Fixes #3834
This commit is contained in:
Christian Helmuth 2020-07-28 14:00:03 +02:00 committed by Norman Feske
parent ff5a474e74
commit 0046edf761
3 changed files with 11 additions and 10 deletions

View File

@ -109,13 +109,8 @@ class Genode::Slab : public Allocator
*/ */
~Slab(); ~Slab();
/** static constexpr size_t overhead_per_block() { return 4*sizeof(addr_t); }
* Return number of bytes consumed per slab entry static constexpr size_t overhead_per_entry() { return sizeof(addr_t) + 1; }
*
* The function takes the slab-internal meta-data needs and the actual
* slab entry into account.
*/
static size_t entry_costs(size_t slab_size, size_t block_size);
/** /**
* Return number of unused slab entries * Return number of unused slab entries

View File

@ -16,12 +16,16 @@
#include <base/slab.h> #include <base/slab.h>
namespace Genode { template <typename, size_t> struct Tslab; } namespace Genode { template <typename, size_t, unsigned> struct Tslab; }
template <typename T, Genode::size_t BLOCK_SIZE> template <typename T, Genode::size_t BLOCK_SIZE, unsigned MIN_SLABS_PER_BLOCK = 8>
struct Genode::Tslab : Slab struct Genode::Tslab : Slab
{ {
/* check if reasonable amount of entries + overhead fits one block */
static_assert(MIN_SLABS_PER_BLOCK*(sizeof(T)+overhead_per_entry())
+ overhead_per_block() <= BLOCK_SIZE);
Tslab(Allocator *backing_store, void *initial_sb = 0) Tslab(Allocator *backing_store, void *initial_sb = 0)
: Slab(sizeof(T), BLOCK_SIZE, initial_sb, backing_store) : Slab(sizeof(T), BLOCK_SIZE, initial_sb, backing_store)
{ } { }

View File

@ -219,12 +219,14 @@ Slab::Slab(size_t slab_size, size_t block_size, void *initial_sb,
*/ */
_entries_per_block((_block_size - sizeof(Block) - sizeof(umword_t)) _entries_per_block((_block_size - sizeof(Block) - sizeof(umword_t))
/ (_slab_size + sizeof(Entry) + 1)), / (_slab_size + sizeof(Entry) + 1)),
_initial_sb((Block *)initial_sb), _initial_sb((Block *)initial_sb),
_nested(false), _nested(false),
_curr_sb((Block *)initial_sb), _curr_sb((Block *)initial_sb),
_backing_store(backing_store) _backing_store(backing_store)
{ {
static_assert(sizeof(Slab::Block) <= overhead_per_block());
static_assert(sizeof(Slab::Entry) <= overhead_per_entry());
/* if no initial slab block was specified, try to get one */ /* if no initial slab block was specified, try to get one */
if (!_curr_sb && _backing_store) if (!_curr_sb && _backing_store)
_new_slab_block().with_result( _new_slab_block().with_result(