2011-12-22 15:19:25 +00:00
|
|
|
/*
|
|
|
|
* \brief Slab allocator
|
|
|
|
* \author Norman Feske
|
|
|
|
* \date 2006-04-18
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2017-02-20 12:23:52 +00:00
|
|
|
* Copyright (C) 2006-2017 Genode Labs GmbH
|
2011-12-22 15:19:25 +00:00
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
2017-02-20 12:23:52 +00:00
|
|
|
* under the terms of the GNU Affero General Public License version 3.
|
2011-12-22 15:19:25 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _INCLUDE__BASE__SLAB_H_
|
|
|
|
#define _INCLUDE__BASE__SLAB_H_
|
|
|
|
|
|
|
|
#include <base/allocator.h>
|
|
|
|
#include <base/stdint.h>
|
|
|
|
|
2016-03-31 16:17:07 +00:00
|
|
|
namespace Genode { class Slab; }
|
2011-12-22 15:19:25 +00:00
|
|
|
|
|
|
|
|
2016-03-31 16:17:07 +00:00
|
|
|
class Genode::Slab : public Allocator
|
2015-03-04 20:12:14 +00:00
|
|
|
{
|
|
|
|
private:
|
|
|
|
|
2016-03-31 16:17:07 +00:00
|
|
|
struct Block;
|
|
|
|
struct Entry;
|
2015-03-04 20:12:14 +00:00
|
|
|
|
2016-03-31 16:17:07 +00:00
|
|
|
size_t const _slab_size; /* size of one slab entry */
|
|
|
|
size_t const _block_size; /* size of slab block */
|
|
|
|
size_t const _entries_per_block; /* number of slab entries per block */
|
2015-03-04 20:12:14 +00:00
|
|
|
|
2016-03-31 17:37:14 +00:00
|
|
|
Block *_initial_sb; /* initial (static) slab block */
|
|
|
|
bool _nested; /* indicator for nested call of alloc */
|
|
|
|
|
|
|
|
size_t _num_blocks = 0;
|
|
|
|
size_t _total_avail = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Block used for attempting the next allocation
|
|
|
|
*/
|
|
|
|
Block *_curr_sb = nullptr;
|
2015-03-20 16:50:41 +00:00
|
|
|
|
2019-01-30 16:27:46 +00:00
|
|
|
Allocator *_backing_store;
|
2015-03-04 20:12:14 +00:00
|
|
|
|
|
|
|
/**
|
2016-03-31 16:17:07 +00:00
|
|
|
* Allocate and initialize new slab block
|
2015-03-04 20:12:14 +00:00
|
|
|
*/
|
2016-03-31 16:17:07 +00:00
|
|
|
Block *_new_slab_block();
|
2015-03-04 20:12:14 +00:00
|
|
|
|
|
|
|
|
2016-03-31 16:17:07 +00:00
|
|
|
/*****************************
|
|
|
|
** Methods used by 'Block' **
|
|
|
|
*****************************/
|
2015-03-04 20:12:14 +00:00
|
|
|
|
2016-03-31 17:37:14 +00:00
|
|
|
void _release_backing_store(Block *);
|
|
|
|
|
2015-03-04 20:12:14 +00:00
|
|
|
/**
|
2016-03-31 17:37:14 +00:00
|
|
|
* Insert block into slab block ring
|
2015-03-04 20:12:14 +00:00
|
|
|
*
|
2016-03-31 16:17:07 +00:00
|
|
|
* \noapi
|
2015-03-04 20:12:14 +00:00
|
|
|
*/
|
2016-03-31 17:37:14 +00:00
|
|
|
void _insert_sb(Block *);
|
2015-03-04 20:12:14 +00:00
|
|
|
|
|
|
|
/**
|
2016-03-31 17:37:14 +00:00
|
|
|
* Release slab block
|
2015-03-04 20:12:14 +00:00
|
|
|
*/
|
2016-03-31 17:37:14 +00:00
|
|
|
void _free_curr_sb();
|
2015-03-04 20:12:14 +00:00
|
|
|
|
|
|
|
/**
|
2016-03-31 16:17:07 +00:00
|
|
|
* Free slab entry
|
2015-03-04 20:12:14 +00:00
|
|
|
*/
|
2016-03-31 16:17:07 +00:00
|
|
|
void _free(void *addr);
|
2015-03-04 20:12:14 +00:00
|
|
|
|
Follow practices suggested by "Effective C++"
The patch adjust the code of the base, base-<kernel>, and os repository.
To adapt existing components to fix violations of the best practices
suggested by "Effective C++" as reported by the -Weffc++ compiler
argument. The changes follow the patterns outlined below:
* A class with virtual functions can no longer publicly inherit base
classed without a vtable. The inherited object may either be moved
to a member variable, or inherited privately. The latter would be
used for classes that inherit 'List::Element' or 'Avl_node'. In order
to enable the 'List' and 'Avl_tree' to access the meta data, the
'List' must become a friend.
* Instead of adding a virtual destructor to abstract base classes,
we inherit the new 'Interface' class, which contains a virtual
destructor. This way, single-line abstract base classes can stay
as compact as they are now. The 'Interface' utility resides in
base/include/util/interface.h.
* With the new warnings enabled, all member variables must be explicitly
initialized. Basic types may be initialized with '='. All other types
are initialized with braces '{ ... }' or as class initializers. If
basic types and non-basic types appear in a row, it is nice to only
use the brace syntax (also for basic types) and align the braces.
* If a class contains pointers as members, it must now also provide a
copy constructor and assignment operator. In the most cases, one
would make them private, effectively disallowing the objects to be
copied. Unfortunately, this warning cannot be fixed be inheriting
our existing 'Noncopyable' class (the compiler fails to detect that
the inheriting class cannot be copied and still gives the error).
For now, we have to manually add declarations for both the copy
constructor and assignment operator as private class members. Those
declarations should be prepended with a comment like this:
/*
* Noncopyable
*/
Thread(Thread const &);
Thread &operator = (Thread const &);
In the future, we should revisit these places and try to replace
the pointers with references. In the presence of at least one
reference member, the compiler would no longer implicitly generate
a copy constructor. So we could remove the manual declaration.
Issue #465
2017-12-21 14:42:15 +00:00
|
|
|
/*
|
|
|
|
* Noncopyable
|
|
|
|
*/
|
|
|
|
Slab(Slab const &);
|
|
|
|
Slab &operator = (Slab const &);
|
|
|
|
|
2015-03-04 20:12:14 +00:00
|
|
|
public:
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Constructor
|
|
|
|
*
|
|
|
|
* At construction time, there exists one initial slab
|
|
|
|
* block that is used for the first couple of allocations,
|
|
|
|
* especially for the allocation of the second slab
|
|
|
|
* block.
|
|
|
|
*/
|
2016-03-31 16:17:07 +00:00
|
|
|
Slab(size_t slab_size, size_t block_size, void *initial_sb,
|
2015-03-04 20:12:14 +00:00
|
|
|
Allocator *backing_store = 0);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Destructor
|
|
|
|
*/
|
|
|
|
~Slab();
|
|
|
|
|
base: remove Child::heap
This patch improves the accounting for the backing store of
session-state meta data. Originally, the session state used to be
allocated by a child-local heap partition fed from the child's RAM
session. However, whereas this approach was somehow practical from a
runtime's (parent's) point of view, the child component could not count
on the quota in its own RAM session. I.e., if the Child::heap grew at
the parent side, the child's RAM session would magically diminish. This
caused two problems. First, it violates assumptions of components like
init that carefully manage their RAM resources (and giving most of them
away their children). Second, if a child transfers most of its RAM
session quota to another RAM session (like init does), the child's RAM
session may actually not allow the parent's heap to grow, which is a
very difficult error condition to deal with.
In the new version, there is no Child::heap anymore. Instead, session
states are allocated from the runtime's RAM session. In order to let
children pay for these costs, the parent withdraws the local session
costs from the session quota donated from the child when the child
initiates a new session. Hence, in principle, all components on the
route of the session request take a small bite from the session quota to
pay for their local book keeping
Consequently, the session quota that ends up at the server may become
depleted more or less, depending on the route. In the case where the
remaining quota is insufficient for the server, the server responds with
'QUOTA_EXCEEDED'. Since this behavior must generally be expected, this
patch equips the client-side 'Env::session' implementation with the
ability to re-issue session requests with successively growing quota
donations.
For several of core's services (ROM, IO_MEM, IRQ), the default session
quota has now increased by 2 KiB, which should suffice for session
requests to up to 3 hops as is the common case for most run scripts. For
longer routes, the retry mechanism as described above comes into effect.
For the time being, we give a warning whenever the server-side quota
check triggers the retry mechanism. The warning may eventually be
removed at a later stage.
2017-02-19 09:31:50 +00:00
|
|
|
/**
|
|
|
|
* Return number of bytes consumed per slab entry
|
|
|
|
*
|
|
|
|
* The function takes the slab-internal meta-data needs and the actual
|
|
|
|
* slab entry into account.
|
|
|
|
*/
|
|
|
|
static size_t entry_costs(size_t slab_size, size_t block_size);
|
|
|
|
|
2017-05-07 21:12:20 +00:00
|
|
|
/**
|
|
|
|
* Return number of unused slab entries
|
|
|
|
*/
|
|
|
|
size_t avail_entries() const { return _total_avail; }
|
|
|
|
|
2015-03-04 20:12:14 +00:00
|
|
|
/**
|
2016-03-31 16:17:07 +00:00
|
|
|
* Add new slab block as backing store
|
2015-03-20 16:50:41 +00:00
|
|
|
*
|
2016-03-31 16:17:07 +00:00
|
|
|
* The specified 'ptr' has to point to a buffer with the size of one
|
|
|
|
* slab block.
|
2015-03-04 20:12:14 +00:00
|
|
|
*/
|
2016-03-31 16:17:07 +00:00
|
|
|
void insert_sb(void *ptr);
|
2015-03-04 20:12:14 +00:00
|
|
|
|
|
|
|
/**
|
2016-03-31 16:17:07 +00:00
|
|
|
* Return a used slab element, or nullptr if empty
|
2015-03-04 20:12:14 +00:00
|
|
|
*/
|
2016-03-31 16:17:07 +00:00
|
|
|
void *any_used_elem();
|
2015-03-04 20:12:14 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Define/request backing-store allocator
|
2015-03-20 16:50:41 +00:00
|
|
|
*
|
|
|
|
* \noapi
|
2015-03-04 20:12:14 +00:00
|
|
|
*/
|
|
|
|
void backing_store(Allocator *bs) { _backing_store = bs; }
|
2015-03-20 16:50:41 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Request backing-store allocator
|
|
|
|
*
|
|
|
|
* \noapi
|
|
|
|
*/
|
2015-03-04 20:12:14 +00:00
|
|
|
Allocator *backing_store() { return _backing_store; }
|
|
|
|
|
2016-03-31 16:17:07 +00:00
|
|
|
|
2015-03-04 20:12:14 +00:00
|
|
|
/*************************
|
|
|
|
** Allocator interface **
|
|
|
|
*************************/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Allocate slab entry
|
|
|
|
*
|
|
|
|
* The 'size' parameter is ignored as only slab entries with
|
|
|
|
* preconfigured slab-entry size are allocated.
|
|
|
|
*/
|
2015-04-14 12:14:20 +00:00
|
|
|
bool alloc(size_t size, void **addr) override;
|
2016-03-31 16:17:07 +00:00
|
|
|
void free(void *addr, size_t) override { _free(addr); }
|
2015-04-14 12:14:20 +00:00
|
|
|
size_t consumed() const override;
|
2016-03-31 16:17:07 +00:00
|
|
|
size_t overhead(size_t) const override { return _block_size/_entries_per_block; }
|
2015-03-04 20:12:14 +00:00
|
|
|
bool need_size_for_free() const override { return false; }
|
|
|
|
};
|
|
|
|
|
2011-12-22 15:19:25 +00:00
|
|
|
#endif /* _INCLUDE__BASE__SLAB_H_ */
|