mirror of
https://github.com/genodelabs/genode.git
synced 2025-06-01 23:20:55 +00:00
Tweak slab allocators to use whole pages
Most slab allocators in core use a sliced heap as backing store. Since sliced-heap allocations are performed at page-granularity, it is sensible to dimension the slab blocks to fill whole pages.
This commit is contained in:
parent
ba0545de07
commit
b8cd56cb90
@ -24,7 +24,14 @@ namespace Genode {
|
|||||||
|
|
||||||
class Allocator_avl_base;
|
class Allocator_avl_base;
|
||||||
|
|
||||||
template <typename, unsigned SLAB_BLOCK_SIZE = 256*sizeof(addr_t)>
|
/*
|
||||||
|
* The default slab block size is dimensioned such that slab-block
|
||||||
|
* allocations make effective use of entire memory pages. To account for
|
||||||
|
* the common pattern of using a 'Sliced_heap' as backing store for the
|
||||||
|
* 'Allocator_avl'. We remove 8 words from the slab-block size to take the
|
||||||
|
* meta-data overhead of each sliced-heap block into account.
|
||||||
|
*/
|
||||||
|
template <typename, unsigned SLAB_BLOCK_SIZE = (1024 - 8)*sizeof(addr_t)>
|
||||||
class Allocator_avl_tpl;
|
class Allocator_avl_tpl;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -338,6 +345,11 @@ class Genode::Allocator_avl_tpl : public Allocator_avl_base
|
|||||||
|
|
||||||
~Allocator_avl_tpl() { _revert_allocations_and_ranges(); }
|
~Allocator_avl_tpl() { _revert_allocations_and_ranges(); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return size of slab blocks used for meta data
|
||||||
|
*/
|
||||||
|
static constexpr size_t slab_block_size() { return SLAB_BLOCK_SIZE; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Assign custom meta data to block at specified address
|
* Assign custom meta data to block at specified address
|
||||||
*/
|
*/
|
||||||
|
@ -38,23 +38,6 @@ class Genode::Heap : public Allocator
|
|||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
|
|
||||||
enum {
|
|
||||||
MIN_CHUNK_SIZE = 4*1024, /* in machine words */
|
|
||||||
MAX_CHUNK_SIZE = 256*1024,
|
|
||||||
/*
|
|
||||||
* Meta data includes the Dataspace structure and meta data of
|
|
||||||
* the AVL allocator.
|
|
||||||
*/
|
|
||||||
META_DATA_SIZE = 1024, /* in bytes */
|
|
||||||
/*
|
|
||||||
* Allocation sizes >= this value are considered as big
|
|
||||||
* allocations, which get their own dataspace. In contrast
|
|
||||||
* to smaller allocations, this memory is released to
|
|
||||||
* the RAM session when 'free()' is called.
|
|
||||||
*/
|
|
||||||
BIG_ALLOCATION_THRESHOLD = 64*1024 /* in bytes */
|
|
||||||
};
|
|
||||||
|
|
||||||
class Dataspace : public List<Dataspace>::Element
|
class Dataspace : public List<Dataspace>::Element
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -127,16 +110,7 @@ class Genode::Heap : public Allocator
|
|||||||
Region_map *region_map,
|
Region_map *region_map,
|
||||||
size_t quota_limit = UNLIMITED,
|
size_t quota_limit = UNLIMITED,
|
||||||
void *static_addr = 0,
|
void *static_addr = 0,
|
||||||
size_t static_size = 0)
|
size_t static_size = 0);
|
||||||
:
|
|
||||||
_alloc(nullptr),
|
|
||||||
_ds_pool(ram_session, region_map),
|
|
||||||
_quota_limit(quota_limit), _quota_used(0),
|
|
||||||
_chunk_size(MIN_CHUNK_SIZE)
|
|
||||||
{
|
|
||||||
if (static_addr)
|
|
||||||
_alloc->add_range((addr_t)static_addr, static_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
~Heap();
|
~Heap();
|
||||||
|
|
||||||
|
@ -20,6 +20,22 @@
|
|||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MIN_CHUNK_SIZE = 4*1024, /* in machine words */
|
||||||
|
MAX_CHUNK_SIZE = 256*1024,
|
||||||
|
/*
|
||||||
|
* Allocation sizes >= this value are considered as big
|
||||||
|
* allocations, which get their own dataspace. In contrast
|
||||||
|
* to smaller allocations, this memory is released to
|
||||||
|
* the RAM session when 'free()' is called.
|
||||||
|
*/
|
||||||
|
BIG_ALLOCATION_THRESHOLD = 64*1024 /* in bytes */
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Heap::Dataspace_pool::~Dataspace_pool()
|
Heap::Dataspace_pool::~Dataspace_pool()
|
||||||
{
|
{
|
||||||
/* free all ram_dataspaces */
|
/* free all ram_dataspaces */
|
||||||
@ -154,10 +170,10 @@ bool Heap::_unsynchronized_alloc(size_t size, void **out_addr)
|
|||||||
/*
|
/*
|
||||||
* Calculate block size of needed backing store. The block must hold the
|
* Calculate block size of needed backing store. The block must hold the
|
||||||
* requested 'size' and we add some space for meta data
|
* requested 'size' and we add some space for meta data
|
||||||
* ('Dataspace' structures, AVL nodes).
|
* ('Dataspace' structures, AVL-node slab blocks).
|
||||||
* Finally, we align the size to a 4K page.
|
* Finally, we align the size to a 4K page.
|
||||||
*/
|
*/
|
||||||
dataspace_size = size + META_DATA_SIZE;
|
dataspace_size = size + Allocator_avl::slab_block_size() + sizeof(Heap::Dataspace);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* '_chunk_size' is a multiple of 4K, so 'dataspace_size' becomes
|
* '_chunk_size' is a multiple of 4K, so 'dataspace_size' becomes
|
||||||
@ -234,6 +250,22 @@ void Heap::free(void *addr, size_t size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Heap::Heap(Ram_session *ram_session,
|
||||||
|
Region_map *region_map,
|
||||||
|
size_t quota_limit,
|
||||||
|
void *static_addr,
|
||||||
|
size_t static_size)
|
||||||
|
:
|
||||||
|
_alloc(nullptr),
|
||||||
|
_ds_pool(ram_session, region_map),
|
||||||
|
_quota_limit(quota_limit), _quota_used(0),
|
||||||
|
_chunk_size(MIN_CHUNK_SIZE)
|
||||||
|
{
|
||||||
|
if (static_addr)
|
||||||
|
_alloc->add_range((addr_t)static_addr, static_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Heap::~Heap()
|
Heap::~Heap()
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
/* Genode includes */
|
/* Genode includes */
|
||||||
#include <base/tslab.h>
|
#include <base/tslab.h>
|
||||||
|
#include <base/heap.h>
|
||||||
|
|
||||||
/* base-internal includes */
|
/* base-internal includes */
|
||||||
#include <base/internal/page_size.h>
|
#include <base/internal/page_size.h>
|
||||||
@ -26,8 +27,12 @@ namespace Genode
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocator to manage CPU threads associated with a CPU session
|
* Allocator to manage CPU threads associated with a CPU session
|
||||||
|
*
|
||||||
|
* We take the knowledge about the used backing-store allocator (sliced
|
||||||
|
* heap) into account to make sure that slab blocks fill whole pages.
|
||||||
*/
|
*/
|
||||||
typedef Tslab<Cpu_thread_component, get_page_size()> Cpu_thread_allocator;
|
typedef Tslab<Cpu_thread_component, get_page_size() - Sliced_heap::meta_data_size()>
|
||||||
|
Cpu_thread_allocator;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _CORE__INCLUDE__CPU_THREAD_ALLOCATOR_H_ */
|
#endif /* _CORE__INCLUDE__CPU_THREAD_ALLOCATOR_H_ */
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
/* Genode includes */
|
/* Genode includes */
|
||||||
#include <util/list.h>
|
#include <util/list.h>
|
||||||
|
#include <base/heap.h>
|
||||||
#include <base/tslab.h>
|
#include <base/tslab.h>
|
||||||
#include <base/rpc_server.h>
|
#include <base/rpc_server.h>
|
||||||
#include <base/allocator_guard.h>
|
#include <base/allocator_guard.h>
|
||||||
@ -38,7 +39,11 @@ namespace Genode {
|
|||||||
|
|
||||||
class Invalid_dataspace : public Exception { };
|
class Invalid_dataspace : public Exception { };
|
||||||
|
|
||||||
static constexpr size_t SBS = get_page_size();
|
/*
|
||||||
|
* Dimension 'Ds_slab' such that slab blocks (including the
|
||||||
|
* meta-data overhead of the sliced-heap blocks) are page sized.
|
||||||
|
*/
|
||||||
|
static constexpr size_t SBS = get_page_size() - Sliced_heap::meta_data_size();
|
||||||
|
|
||||||
using Ds_slab = Synced_allocator<Tslab<Dataspace_component, SBS> >;
|
using Ds_slab = Synced_allocator<Tslab<Dataspace_component, SBS> >;
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
#include <base/synced_allocator.h>
|
#include <base/synced_allocator.h>
|
||||||
#include <base/signal.h>
|
#include <base/signal.h>
|
||||||
#include <base/rpc_server.h>
|
#include <base/rpc_server.h>
|
||||||
|
#include <base/heap.h>
|
||||||
#include <util/list.h>
|
#include <util/list.h>
|
||||||
#include <util/fifo.h>
|
#include <util/fifo.h>
|
||||||
|
|
||||||
@ -273,7 +274,14 @@ class Genode::Region_map_component : public Rpc_object<Region_map>,
|
|||||||
void sub_rm(Native_capability cap) { _rm_cap = cap; }
|
void sub_rm(Native_capability cap) { _rm_cap = cap; }
|
||||||
};
|
};
|
||||||
|
|
||||||
Tslab<Rm_region_ref, 1024> _ref_slab; /* backing store for
|
/*
|
||||||
|
* Dimension slab allocator for regions such that backing store is
|
||||||
|
* allocated at the granularity of pages.
|
||||||
|
*/
|
||||||
|
typedef Tslab<Rm_region_ref, get_page_size() - Sliced_heap::meta_data_size()>
|
||||||
|
Ref_slab;
|
||||||
|
|
||||||
|
Ref_slab _ref_slab; /* backing store for
|
||||||
region list */
|
region list */
|
||||||
Allocator_avl_tpl<Rm_region> _map; /* region map for attach,
|
Allocator_avl_tpl<Rm_region> _map; /* region map for attach,
|
||||||
detach, pagefaults */
|
detach, pagefaults */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user