Clean up base-library structure

This patch moves the base library from src/base to src/lib/base,
flattens the library-internal directory structure, and moves the common
parts of the library-description files to base/lib/mk/base.inc and
base/lib/mk/base-common.inc.

Furthermore, the patch fixes a few cosmetic issues (whitespace and
comments only) that I encountered while browsing the result.

Fixes #1952
This commit is contained in:
Norman Feske
2016-04-29 13:23:19 +02:00
parent 52cc50174f
commit 40a5af42eb
134 changed files with 183 additions and 458 deletions

View File

@ -0,0 +1,409 @@
/*
* \brief AVL-tree-based memory allocator implementation
* \author Norman Feske
* \date 2006-04-18
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/allocator_avl.h>
#include <base/printf.h>
using namespace Genode;
/**
* Placement operator - tool for directly calling a constructor
*/
inline void *operator new(size_t, void *at) { return at; }
/**************************
** Block Implementation **
**************************/
Allocator_avl_base::Block *
Allocator_avl_base::Block::find_best_fit(size_t size, unsigned align,
addr_t from, addr_t to)
{
/* find child with lowest max_avail value */
bool side = _child_max_avail(1) < _child_max_avail(0);
/* try to find best fitting block in both subtrees */
for (int i = 0; i < 2; i++, side = !side) {
if (_child_max_avail(side) < size)
continue;
Block *res = child(side) ? child(side)->find_best_fit(size, align, from, to) : 0;
if (res)
return (_fits(size, align, from, to) && size < res->size()) ? this : res;
}
return (_fits(size, align, from, to)) ? this : 0;
}
Allocator_avl_base::Block *
Allocator_avl_base::Block::find_by_address(addr_t find_addr, size_t find_size,
bool check_overlap)
{
/* the following checks do not work for size==0 */
find_size = find_size ? find_size : 1;
/* check for overlapping */
if (check_overlap
&& (find_addr + find_size - 1 >= addr())
&& (addr() + size() - 1 >= find_addr))
return this;
/* check for containment */
if ((find_addr >= addr())
&& (find_addr + find_size - 1 <= addr() + size() - 1))
return this;
/* walk into subtree (right if search addr is higher than current */
Block *c = child(find_addr >= addr());
/* if such a subtree exists, follow it */
return c ? c->find_by_address(find_addr, find_size, check_overlap) : 0;
}
size_t Allocator_avl_base::Block::avail_in_subtree(void)
{
size_t ret = avail();
/* accumulate subtrees of children */
for (int i = 0; i < 2; i++)
if (child(i))
ret += child(i)->avail_in_subtree();
return ret;
}
void Allocator_avl_base::Block::recompute()
{
_max_avail = max(_child_max_avail(0), _child_max_avail(1));
_max_avail = max(avail(), _max_avail);
}
/**********************************
** Allocator_avl implementation **
**********************************/
Allocator_avl_base::Block *Allocator_avl_base::_alloc_block_metadata()
{
void *b = 0;
if (_md_alloc->alloc(sizeof(Block), &b))
/* call constructor by using the placement new operator */
return new((Block *)b) Block(0, 0, 0);
return 0;
}
bool Allocator_avl_base::_alloc_two_blocks_metadata(Block **dst1, Block **dst2)
{
*dst1 = _alloc_block_metadata();
*dst2 = _alloc_block_metadata();
if (!*dst1 && *dst2) _md_alloc->free(*dst2, sizeof(Block));
if (!*dst2 && *dst1) _md_alloc->free(*dst1, sizeof(Block));
return (*dst1 && *dst2);
}
int Allocator_avl_base::_add_block(Block *block_metadata,
addr_t base, size_t size, bool used)
{
if (!block_metadata)
return -1;
/* call constructor for new block */
new (block_metadata) Block(base, size, used);
/* insert block into avl tree */
_addr_tree.insert(block_metadata);
return 0;
}
void Allocator_avl_base::_destroy_block(Block *b)
{
if (!b) return;
/* remove block from both avl trees */
_addr_tree.remove(b);
_md_alloc->free(b, _md_entry_size);
}
void Allocator_avl_base::_cut_from_block(Block *b, addr_t addr, size_t size,
Block *dst1, Block *dst2)
{
size_t padding = addr > b->addr() ? addr - b->addr() : 0;
size_t remaining = b->size() > (size + padding) ? b->size() - size - padding : 0;
addr_t orig_addr = b->addr();
_destroy_block(b);
/* create free block containing the alignment padding */
if (padding > 0)
_add_block(dst1, orig_addr, padding, Block::FREE);
else
_md_alloc->free(dst1, sizeof(Block));
/* create free block for remaining space of original block */
if (remaining > 0)
_add_block(dst2, addr + size, remaining, Block::FREE);
else
_md_alloc->free(dst2, sizeof(Block));
}
void Allocator_avl_base::_revert_allocations_and_ranges()
{
/* revert all allocations */
size_t dangling_allocations = 0;
for (;; dangling_allocations++) {
addr_t addr = 0;
if (!any_block_addr(&addr))
break;
free((void *)addr);
}
if (dangling_allocations)
PWRN("%zd dangling allocation%s at allocator destruction time",
dangling_allocations, (dangling_allocations > 1) ? "s" : "");
/* remove ranges */
while (Block *block = _addr_tree.first())
remove_range(block->addr(), block->size());
}
int Allocator_avl_base::add_range(addr_t new_addr, size_t new_size)
{
Block *b;
/* sanity check for insane users ;-) */
if (!new_size) return -2;
/* check for conflicts with existing blocks */
if (_find_by_address(new_addr, new_size, true))
return -3;
Block *new_block = _alloc_block_metadata();
if (!new_block) return -4;
/* merge with predecessor */
if (new_addr != 0 && (b = _find_by_address(new_addr - 1)) && !b->used()) {
new_size += b->size();
new_addr = b->addr();
_destroy_block(b);
}
/* merge with successor */
if ((b = _find_by_address(new_addr + new_size)) && !b->used()) {
new_size += b->size();
_destroy_block(b);
}
/* create new block that spans over all merged blocks */
return _add_block(new_block, new_addr, new_size, Block::FREE);
}
int Allocator_avl_base::remove_range(addr_t base, size_t size)
{
/* sanity check for insane users ;-) */
if (!size) return -1;
Block *dst1, *dst2;
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
return -2;
/* FIXME removing ranges from allocators with used blocks is not safe! */
while (1) {
/* find block overlapping the specified range */
Block *b = _addr_tree.first();
b = b ? b->find_by_address(base, size, 1) : 0;
/*
* If there are no overlappings with any existing blocks (b == 0), we
* are done. If however, the overlapping block is in use, we have a
* problem. In both cases, return.
*/
if (!b || !b->avail()) {
_md_alloc->free(dst1, sizeof(Block));
_md_alloc->free(dst2, sizeof(Block));
return !b ? 0 : -3;
}
/* cut intersecting address range */
addr_t intersect_beg = max(base, b->addr());
size_t intersect_end = min(base + size - 1, b->addr() + b->size() - 1);
_cut_from_block(b, intersect_beg, intersect_end - intersect_beg + 1, dst1, dst2);
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
return -4;
};
}
Range_allocator::Alloc_return
Allocator_avl_base::alloc_aligned(size_t size, void **out_addr, int align,
addr_t from, addr_t to)
{
Block *dst1, *dst2;
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
return Alloc_return(Alloc_return::OUT_OF_METADATA);
/* find best fitting block */
Block *b = _addr_tree.first();
b = b ? b->find_best_fit(size, align, from, to) : 0;
if (!b) {
_md_alloc->free(dst1, sizeof(Block));
_md_alloc->free(dst2, sizeof(Block));
return Alloc_return(Alloc_return::RANGE_CONFLICT);
}
/* calculate address of new (aligned) block */
addr_t new_addr = align_addr(b->addr() < from ? from : b->addr(), align);
/* remove new block from containing block */
_cut_from_block(b, new_addr, size, dst1, dst2);
/* create allocated block */
Block *new_block = _alloc_block_metadata();
if (!new_block) {
_md_alloc->free(new_block, sizeof(Block));
return Alloc_return(Alloc_return::OUT_OF_METADATA);
}
_add_block(new_block, new_addr, size, Block::USED);
*out_addr = reinterpret_cast<void *>(new_addr);
return Alloc_return(Alloc_return::OK);
}
Range_allocator::Alloc_return Allocator_avl_base::alloc_addr(size_t size, addr_t addr)
{
/* sanity check */
if (!_sum_in_range(addr, size))
return Alloc_return(Alloc_return::RANGE_CONFLICT);
Block *dst1, *dst2;
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
return Alloc_return(Alloc_return::OUT_OF_METADATA);
/* find block at specified address */
Block *b = _addr_tree.first();
b = b ? b->find_by_address(addr, size) : 0;
/* skip if there's no block or block is used */
if (!b || b->used()) {
_md_alloc->free(dst1, sizeof(Block));
_md_alloc->free(dst2, sizeof(Block));
return Alloc_return(Alloc_return::RANGE_CONFLICT);
}
/* remove new block from containing block */
_cut_from_block(b, addr, size, dst1, dst2);
/* create allocated block */
Block *new_block = _alloc_block_metadata();
if (!new_block) {
_md_alloc->free(new_block, sizeof(Block));
return Alloc_return(Alloc_return::OUT_OF_METADATA);
}
_add_block(new_block, addr, size, Block::USED);
return Alloc_return(Alloc_return::OK);
}
void Allocator_avl_base::free(void *addr)
{
/* lookup corresponding block */
Block *b = _find_by_address(reinterpret_cast<addr_t>(addr));
if (!b || !(b->used())) return;
addr_t new_addr = b->addr();
size_t new_size = b->size();
if (new_addr != (addr_t)addr)
PERR("%s: given address (0x%p) is not the block start address (0x%lx)",
__PRETTY_FUNCTION__, addr, new_addr);
_destroy_block(b);
add_range(new_addr, new_size);
}
size_t Allocator_avl_base::size_at(void const *addr) const
{
/* lookup corresponding block */
Block *b = _find_by_address(reinterpret_cast<addr_t>(addr));
return (b && b->used()) ? b->size() : 0;
}
Allocator_avl_base::Block *Allocator_avl_base::_find_any_used_block(Block *sub_tree)
{
if (!sub_tree)
return nullptr;
if (sub_tree->used())
return sub_tree;
for (unsigned i = 0; i < 2; i++)
if (Block *block = _find_any_used_block(sub_tree->child(i)))
return block;
return nullptr;
}
bool Allocator_avl_base::any_block_addr(addr_t *out_addr)
{
Block * const b = _find_any_used_block(_addr_tree.first());
*out_addr = b ? b->addr() : 0;
return b != nullptr;
}
size_t Allocator_avl_base::avail() const
{
Block *b = static_cast<Block *>(_addr_tree.first());
return b ? b->avail_in_subtree() : 0;
}
bool Allocator_avl_base::valid_addr(addr_t addr) const
{
Block *b = _find_by_address(addr);
return b ? true : false;
}

View File

@ -0,0 +1,171 @@
/*
* \brief AVL tree
* \author Norman Feske
* \date 2006-04-12
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <util/avl_tree.h>
#include <base/printf.h>
using namespace Genode;
inline void Avl_node_base::_recompute_depth(Policy &policy)
{
unsigned char old_depth = _depth;
_depth = max(_child_depth(LEFT), _child_depth(RIGHT)) + 1;
/* if our own value changes, update parent */
if (_depth != old_depth && _parent)
_parent->_recompute_depth(policy);
/* call recompute hook only for valid tree nodes */
if (_parent)
policy.recompute(this);
}
void Avl_node_base::_adopt(Avl_node_base *node, Side i, Policy &policy)
{
_child[i] = node;
if (node)
node->_parent = this;
_recompute_depth(policy);
}
void Avl_node_base::_rotate_subtree(Avl_node_base *node, Side side, Policy &policy)
{
int i = (node == _child[0]) ? LEFT : RIGHT;
Avl_node_base *node_r = node->_child[!side];
Avl_node_base *node_r_l = node_r->_child[side];
/* simple rotation */
if (node_r->_bias() == !side) {
node->_adopt(node_r_l, !side, policy);
node_r->_adopt(node, side, policy);
_adopt(node_r, i, policy);
}
/* double rotation */
else if (node_r_l) {
Avl_node_base *node_r_l_l = node_r_l->_child[side];
Avl_node_base *node_r_l_r = node_r_l->_child[!side];
node->_adopt(node_r_l_l, !side, policy);
node_r->_adopt(node_r_l_r, side, policy);
node_r_l->_adopt(node, side, policy);
node_r_l->_adopt(node_r, !side, policy);
_adopt(node_r_l, i, policy);
}
}
void Avl_node_base::_rebalance_subtree(Avl_node_base *node, Policy &policy)
{
int v = node->_child_depth(RIGHT) - node->_child_depth(LEFT);
/* return if subtree is in balance */
if (abs(v) < 2) return;
_rotate_subtree(node, (v < 0), policy);
}
void Avl_node_base::insert(Avl_node_base *node, Policy &policy)
{
if (node == this) {
PERR("Inserting element %p twice into avl tree!", node);
return;
}
Side i = LEFT;
/* for non-root nodes, decide for a branch */
if (_parent)
i = policy.higher(this, node);
if (_child[i])
_child[i]->insert(node, policy);
else
_adopt(node, i, policy);
/* the inserted node might have changed the depth of the subtree */
_recompute_depth(policy);
if (_parent)
_parent->_rebalance_subtree(this, policy);
}
void Avl_node_base::remove(Policy &policy)
{
Avl_node_base *lp = 0;
Avl_node_base *l = _child[0];
if (!_parent)
PERR("Error: tried to remove AVL node that is not in an AVL tree");
if (l) {
/* find right-most node in left sub tree (l) */
while (l && l->_child[1])
l = l->_child[1];
/* isolate right-most node in left sub tree */
if (l == _child[0])
_adopt(l->_child[0], LEFT, policy);
else
l->_parent->_adopt(l->_child[0], RIGHT, policy);
/* consistent state */
/* remember for rebalancing */
if (l->_parent != this)
lp = l->_parent;
/* exchange this and l */
for (int i = 0; i < 2; i++)
if (_parent->_child[i] == this)
_parent->_adopt(l, i, policy);
l->_adopt(_child[0], LEFT, policy);
l->_adopt(_child[1], RIGHT, policy);
} else {
/* no left sub tree, attach our right sub tree to our parent */
for (int i = 0; i < 2; i++)
if (_parent->_child[i] == this)
_parent->_adopt(_child[1], i, policy);
}
/* walk the tree towards its root and rebalance sub trees */
while (lp && lp->_parent) {
Avl_node_base *lpp = lp->_parent;
lpp->_rebalance_subtree(lp, policy);
lp = lpp;
}
/* reset node pointers */
_child[LEFT] = _child[RIGHT] = 0;
_parent = 0;
}
Avl_node_base::Avl_node_base() : _parent(0), _depth(1) {
_child[LEFT] = _child[RIGHT] = 0; }

View File

@ -0,0 +1,21 @@
/*
* \brief Implementation of the cache operations
* \author Christian Prochaska
* \date 2014-05-13
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <cpu/cache.h>
/*
* This function needs to be implemented only for base platforms with ARM
* support right now, so the default implementation does nothing.
*/
void cache_coherent(Genode::addr_t, Genode::size_t) { }

View File

@ -0,0 +1,520 @@
/*
* \brief Child creation framework
* \author Norman Feske
* \date 2006-07-22
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/child.h>
using namespace Genode;
/***************
** Utilities **
***************/
namespace {
/**
* Guard for transferring quota donation
*
* This class is used to provide transactional semantics of quota
* transfers. Establishing a new session involves several steps, in
* particular subsequent quota transfers. If one intermediate step
* fails, we need to revert all quota transfers that already took
* place. When instantated at a local scope, a 'Transfer' object guards
* a quota transfer. If the scope is left without prior an explicit
* acknowledgement of the transfer (for example via an exception), the
* destructor the 'Transfer' object reverts the transfer in flight.
*/
class Transfer {
bool _ack;
size_t _quantum;
Ram_session_capability _from;
Ram_session_capability _to;
public:
/**
* Constructor
*
* \param quantim number of bytes to transfer
* \param from donator RAM session
* \param to receiver RAM session
*/
Transfer(size_t quantum,
Ram_session_capability from,
Ram_session_capability to)
: _ack(false), _quantum(quantum), _from(from), _to(to)
{
if (_from.valid() && _to.valid() &&
Ram_session_client(_from).transfer_quota(_to, quantum)) {
PWRN("not enough quota for a donation of %zu bytes", quantum);
throw Parent::Quota_exceeded();
}
}
/**
* Destructor
*
* The destructor will be called when leaving the scope of the
* 'session' function. If the scope is left because of an error
* (e.g., an exception), the donation will be reverted.
*/
~Transfer()
{
if (!_ack && _from.valid() && _to.valid())
Ram_session_client(_to).transfer_quota(_from, _quantum);
}
/**
* Acknowledge quota donation
*/
void acknowledge() { _ack = true; }
};
}
/********************
** Child::Session **
********************/
class Child::Session : public Object_pool<Session>::Entry,
public List<Session>::Element
{
private:
enum { IDENT_LEN = 16 };
/**
* Session capability at the server
*/
Session_capability _cap;
/**
* Service interface that was used to create the session
*/
Service *_service;
/**
* Server implementing the session
*
* Even though we can normally determine the server of the session via
* '_service->server()', this does not apply when destructing a server.
* During destruction, we use the 'Server' pointer as opaque key for
* revoking active sessions of the server. So we keep a copy
* independent of the 'Service' object.
*/
Server *_server;
/**
* Total of quota associated with this session
*/
size_t _donated_ram_quota;
/**
* Name of session, used for debugging
*/
char _ident[IDENT_LEN];
public:
/**
* Constructor
*
* \param session session capability
* \param service service that implements the session
* \param ram_quota initial quota donation associated with
* the session
* \param ident optional session identifier, used for
* debugging
*/
Session(Session_capability session, Service *service,
size_t ram_quota, const char *ident = "<noname>")
:
Object_pool<Session>::Entry(session), _cap(session),
_service(service), _server(service->server()),
_donated_ram_quota(ram_quota) {
strncpy(_ident, ident, sizeof(_ident)); }
/**
* Default constructor creates invalid session
*/
Session() : _service(0), _donated_ram_quota(0) { }
/**
* Extend amount of ram attached to the session
*/
void upgrade_ram_quota(size_t ram_quota) {
_donated_ram_quota += ram_quota; }
/**
* Accessors
*/
Session_capability cap() const { return _cap; }
size_t donated_ram_quota() const { return _donated_ram_quota; }
bool valid() const { return _service != 0; }
Service *service() const { return _service; }
Server *server() const { return _server; }
const char *ident() const { return _ident; }
};
/***********
** Child **
***********/
void Child::_add_session(Child::Session const &s)
{
Lock::Guard lock_guard(_lock);
/*
* Store session information in a new child's meta data structure. The
* allocation from 'heap()' may throw a 'Ram_session::Quota_exceeded'
* exception.
*/
Session *session = 0;
try {
session = new (heap())
Session(s.cap(), s.service(),
s.donated_ram_quota(), s.ident()); }
catch (Allocator::Out_of_memory) {
throw Parent::Quota_exceeded(); }
/* these functions may also throw 'Ram_session::Quota_exceeded' */
_session_pool.insert(session);
_session_list.insert(session);
}
void Child::_remove_session(Child::Session *s)
{
/* forget about this session */
_session_list.remove(s);
/* return session quota to the ram session of the child */
if (_policy.ref_ram_session()->transfer_quota(_ram, s->donated_ram_quota()))
PERR("We ran out of our own quota");
destroy(heap(), s);
}
Service &Child::_parent_service()
{
static Parent_service parent_service("");
return parent_service;
}
void Child::_close(Session* s)
{
if (!s) {
PWRN("no session structure found");
return;
}
/*
* There is a chance that the server is not responding to the 'close' call,
* making us block infinitely. However, by using core's cancel-blocking
* mechanism, we can cancel the 'close' call by another (watchdog) thread
* that invokes 'cancel_blocking' at our thread after a timeout. The
* unblocking is reflected at the API level as an 'Blocking_canceled'
* exception. We catch this exception to proceed with normal operation
* after being unblocked.
*/
try { s->service()->close(s->cap()); }
catch (Blocking_canceled) {
PDBG("Got Blocking_canceled exception during %s->close call\n",
s->ident()); }
/*
* If the session was provided by a child of us,
* 'server()->ram_session_cap()' returns the RAM session of the
* corresponding child. Since the session to the server is closed now, we
* expect that the server released all donated resources and we can
* decrease the servers' quota.
*
* If this goes wrong, the server is misbehaving.
*/
if (s->service()->ram_session_cap().valid()) {
Ram_session_client server_ram(s->service()->ram_session_cap());
if (server_ram.transfer_quota(_policy.ref_ram_cap(),
s->donated_ram_quota())) {
PERR("Misbehaving server '%s'!", s->service()->name());
}
}
{
Lock::Guard lock_guard(_lock);
_remove_session(s);
}
}
void Child::revoke_server(Server const *server)
{
Lock::Guard lock_guard(_lock);
while (1) {
/* search session belonging to the specified server */
Session *s = _session_list.first();
for ( ; s && (s->server() != server); s = s->next());
/* if no matching session exists, we are done */
if (!s) return;
_session_pool.apply(s->cap(), [&] (Session *s) {
if (s) _session_pool.remove(s); });
_remove_session(s);
}
}
void Child::yield(Resource_args const &args)
{
Lock::Guard guard(_yield_request_lock);
/* buffer yield request arguments to be picked up by the child */
_yield_request_args = args;
/* notify the child about the yield request */
if (_yield_sigh.valid())
Signal_transmitter(_yield_sigh).submit();
}
void Child::notify_resource_avail() const
{
if (_resource_avail_sigh.valid())
Signal_transmitter(_resource_avail_sigh).submit();
}
void Child::announce(Parent::Service_name const &name, Root_capability root)
{
if (!name.is_valid_string()) return;
_policy.announce_service(name.string(), root, heap(), &_server);
}
Session_capability Child::session(Parent::Service_name const &name,
Parent::Session_args const &args,
Affinity const &affinity)
{
if (!name.is_valid_string() || !args.is_valid_string()) throw Unavailable();
/* return sessions that we created for the child */
if (!strcmp("Env::ram_session", name.string())) return _ram;
if (!strcmp("Env::cpu_session", name.string())) return _cpu;
if (!strcmp("Env::pd_session", name.string())) return _pd;
/* filter session arguments according to the child policy */
strncpy(_args, args.string(), sizeof(_args));
_policy.filter_session_args(name.string(), _args, sizeof(_args));
/* filter session affinity */
Affinity const filtered_affinity = _policy.filter_session_affinity(affinity);
/* transfer the quota donation from the child's account to ourself */
size_t ram_quota = Arg_string::find_arg(_args, "ram_quota").ulong_value(0);
Transfer donation_from_child(ram_quota, _ram, _policy.ref_ram_cap());
Service *service = _policy.resolve_session_request(name.string(), _args);
/* raise an error if no matching service provider could be found */
if (!service)
throw Service_denied();
/* transfer session quota from ourself to the service provider */
Transfer donation_to_service(ram_quota, _policy.ref_ram_cap(),
service->ram_session_cap());
/* create session */
Session_capability cap;
try { cap = service->session(_args, filtered_affinity); }
catch (Service::Invalid_args) { throw Service_denied(); }
catch (Service::Unavailable) { throw Service_denied(); }
catch (Service::Quota_exceeded) { throw Quota_exceeded(); }
/* register session */
try { _add_session(Session(cap, service, ram_quota, name.string())); }
catch (Ram_session::Quota_exceeded) { throw Quota_exceeded(); }
/* finish transaction */
donation_from_child.acknowledge();
donation_to_service.acknowledge();
return cap;
}
void Child::upgrade(Session_capability to_session, Parent::Upgrade_args const &args)
{
Service *targeted_service = 0;
/* check of upgrade refers to an Env:: resource */
if (to_session.local_name() == _ram.local_name())
targeted_service = &_ram_service;
if (to_session.local_name() == _cpu.local_name())
targeted_service = &_cpu_service;
if (to_session.local_name() == _pd.local_name())
targeted_service = &_pd_service;
/* check if upgrade refers to server */
_session_pool.apply(to_session, [&] (Session *session)
{
if (session)
targeted_service = session->service();
if (!targeted_service) {
PWRN("could not lookup service for session upgrade");
return;
}
if (!args.is_valid_string()) {
PWRN("no valid session-upgrade arguments");
return;
}
size_t const ram_quota =
Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0);
/* transfer quota from client to ourself */
Transfer donation_from_child(ram_quota, _ram, _policy.ref_ram_cap());
/* transfer session quota from ourself to the service provider */
Transfer donation_to_service(ram_quota, _policy.ref_ram_cap(),
targeted_service->ram_session_cap());
try { targeted_service->upgrade(to_session, args.string()); }
catch (Service::Quota_exceeded) { throw Quota_exceeded(); }
/* remember new amount attached to the session */
if (session)
session->upgrade_ram_quota(ram_quota);
/* finish transaction */
donation_from_child.acknowledge();
donation_to_service.acknowledge();
});
}
void Child::close(Session_capability session_cap)
{
/* refuse to close the child's initial sessions */
if (session_cap.local_name() == _ram.local_name()
|| session_cap.local_name() == _cpu.local_name()
|| session_cap.local_name() == _pd.local_name())
return;
Session *session = nullptr;
_session_pool.apply(session_cap, [&] (Session *s)
{
session = s;
if (s) _session_pool.remove(s);
});
_close(session);
}
void Child::exit(int exit_value)
{
/*
* This function receives the hint from the child that now, its a good time
* to kill it. An inherited child class could use this hint to schedule the
* destruction of the child object.
*
* Note that the child object must not be destructed from by this function
* because it is executed by the thread contained in the child object.
*/
return _policy.exit(exit_value);
}
Thread_capability Child::main_thread_cap() const
{
return _process.initial_thread.cap;
}
void Child::resource_avail_sigh(Signal_context_capability sigh)
{
_resource_avail_sigh = sigh;
}
void Child::resource_request(Resource_args const &args)
{
_policy.resource_request(args);
}
void Child::yield_sigh(Signal_context_capability sigh) { _yield_sigh = sigh; }
Parent::Resource_args Child::yield_request()
{
Lock::Guard guard(_yield_request_lock);
return _yield_request_args;
}
void Child::yield_response() { _policy.yield_response(); }
Child::Child(Dataspace_capability elf_ds,
Dataspace_capability ldso_ds,
Pd_session_capability pd_cap,
Pd_session &pd,
Ram_session_capability ram_cap,
Ram_session &ram,
Cpu_session_capability cpu_cap,
Cpu_session &cpu,
Region_map &local_rm,
Region_map &remote_rm,
Rpc_entrypoint &entrypoint,
Child_policy &policy,
Service &pd_service,
Service &ram_service,
Service &cpu_service)
try :
_pd(pd_cap), _ram(ram_cap), _cpu(cpu_cap),
_pd_service(pd_service),
_ram_service(ram_service),
_cpu_service(cpu_service),
_heap(&ram, &local_rm),
_entrypoint(entrypoint),
_parent_cap(_entrypoint.manage(this)),
_policy(policy),
_server(_ram),
_process(elf_ds, ldso_ds, pd_cap, pd, ram, cpu, local_rm, remote_rm,
_parent_cap, policy.name())
{ }
catch (Cpu_session::Thread_creation_failed) { throw Process_startup_failed(); }
catch (Cpu_session::Out_of_metadata) { throw Process_startup_failed(); }
catch (Process::Missing_dynamic_linker) { throw Process_startup_failed(); }
catch (Process::Invalid_executable) { throw Process_startup_failed(); }
catch (Region_map::Attach_failed) { throw Process_startup_failed(); }
Child::~Child()
{
_entrypoint.dissolve(this);
_policy.unregister_services();
_session_pool.remove_all([&] (Session *s) { _close(s); });
}

View File

@ -0,0 +1,213 @@
/*
* \brief Process creation
* \author Norman Feske
* \author Christian Helmuth
* \date 2006-07-18
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/printf.h>
#include <base/child.h>
/* base-internal includes */
#include <base/internal/elf.h>
using namespace Genode;
Child::Process::Loaded_executable::Loaded_executable(Dataspace_capability elf_ds,
Dataspace_capability ldso_ds,
Ram_session &ram,
Region_map &local_rm,
Region_map &remote_rm,
Parent_capability parent_cap)
{
/* skip loading when called during fork */
if (!elf_ds.valid())
return;
/* attach ELF locally */
addr_t elf_addr;
try { elf_addr = local_rm.attach(elf_ds); }
catch (Region_map::Attach_failed) {
PERR("local attach of ELF executable failed"); throw; }
/* setup ELF object and read program entry pointer */
Elf_binary elf(elf_addr);
if (!elf.valid())
throw Invalid_executable();
/*
* If the specified executable is a dynamically linked program, we load
* the dynamic linker instead.
*/
if (elf.is_dynamically_linked()) {
local_rm.detach(elf_addr);
if (!ldso_ds.valid()) {
PERR("attempt to start dynamic executable without dynamic linker");
throw Missing_dynamic_linker();
}
try { elf_addr = local_rm.attach(ldso_ds); }
catch (Region_map::Attach_failed) {
PERR("local attach of dynamic linker failed"); throw; }
elf_ds = ldso_ds;
elf = Elf_binary(elf_addr);
}
entry = elf.entry();
/* setup region map for the new pd */
Elf_segment seg;
for (unsigned n = 0; (seg = elf.get_segment(n)).valid(); ++n) {
if (seg.flags().skip) continue;
/* same values for r/o and r/w segments */
addr_t const addr = (addr_t)seg.start();
size_t const size = seg.mem_size();
bool parent_info = false;
bool const write = seg.flags().w;
bool const exec = seg.flags().x;
if (write) {
/* read-write segment */
/*
* Note that a failure to allocate a RAM dataspace after other
* segments were successfully allocated will not revert the
* previous allocations. The successful allocations will leak.
* In practice, this is not a problem as each component has its
* distinct RAM session. When the process creation failed, the
* entire RAM session will be destroyed and the memory will be
* regained.
*/
/* alloc dataspace */
Dataspace_capability ds_cap;
try { ds_cap = ram.alloc(size); }
catch (Ram_session::Alloc_failed) {
PERR("allocation of read-write segment failed"); throw; };
/* attach dataspace */
void *base;
try { base = local_rm.attach(ds_cap); }
catch (Region_map::Attach_failed) {
PERR("local attach of segment dataspace failed"); throw; }
void * const ptr = base;
addr_t const laddr = elf_addr + seg.file_offset();
/* copy contents and fill with zeros */
memcpy(ptr, (void *)laddr, seg.file_size());
if (size > seg.file_size())
memset((void *)((addr_t)ptr + seg.file_size()),
0, size - seg.file_size());
/*
* We store the parent information at the beginning of the first
* data segment
*/
if (!parent_info) {
Native_capability::Raw *raw = (Native_capability::Raw *)ptr;
raw->dst = parent_cap.dst();
raw->local_name = parent_cap.local_name();
parent_info = true;
}
/* detach dataspace */
local_rm.detach(base);
off_t const offset = 0;
try { remote_rm.attach_at(ds_cap, addr, size, offset); }
catch (Region_map::Attach_failed) {
PERR("remote attach of read-write segment failed"); throw; }
} else {
/* read-only segment */
if (seg.file_size() != seg.mem_size())
PWRN("filesz and memsz for read-only segment differ");
off_t const offset = seg.file_offset();
try {
if (exec)
remote_rm.attach_executable(elf_ds, addr, size, offset);
else
remote_rm.attach_at(elf_ds, addr, size, offset);
}
catch (Region_map::Attach_failed) {
PERR("remote attach of read-only segment failed"); throw; }
}
}
/* detach ELF */
local_rm.detach((void *)elf_addr);
}
Child::Process::Initial_thread::Initial_thread(Cpu_session &cpu,
Pd_session_capability pd,
char const *name)
:
cpu(cpu),
cap(cpu.create_thread(pd, Cpu_session::DEFAULT_WEIGHT, name))
{ }
Child::Process::Initial_thread::~Initial_thread()
{
cpu.kill_thread(cap);
}
Child::Process::Process(Dataspace_capability elf_ds,
Dataspace_capability ldso_ds,
Pd_session_capability pd_cap,
Pd_session &pd,
Ram_session &ram,
Cpu_session &cpu,
Region_map &local_rm,
Region_map &remote_rm,
Parent_capability parent_cap,
char const *name)
:
initial_thread(cpu, pd_cap, name),
loaded_executable(elf_ds, ldso_ds, ram, local_rm, remote_rm, parent_cap)
{
/* register parent interface for new protection domain */
pd.assign_parent(parent_cap);
/*
* Inhibit start of main thread if the new process happens to be forked
* from another. In this case, the main thread will get manually
* started after constructing the 'Process'.
*/
if (!elf_ds.valid())
return;
/* start main thread */
if (cpu.start(initial_thread.cap, loaded_executable.entry, 0)) {
PERR("start of initial thread failed");
throw Cpu_session::Thread_creation_failed();
}
}
Child::Process::~Process() { }

View File

@ -0,0 +1,76 @@
/*
* \brief Component bootstrap
* \author Norman Feske
* \author Christian Helmuth
* \date 2016-01-13
*/
/*
* Copyright (C) 2016 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/component.h>
#include <base/env.h>
namespace {
struct Env : Genode::Env
{
Genode::Entrypoint &_ep;
Env(Genode::Entrypoint &ep) : _ep(ep) { }
Genode::Parent &parent() override { return *Genode::env()->parent(); }
Genode::Ram_session &ram() override { return *Genode::env()->ram_session(); }
Genode::Cpu_session &cpu() override { return *Genode::env()->cpu_session(); }
Genode::Region_map &rm() override { return *Genode::env()->rm_session(); }
Genode::Pd_session &pd() override { return *Genode::env()->pd_session(); }
Genode::Entrypoint &ep() override { return _ep; }
Genode::Ram_session_capability ram_session_cap() override
{
return Genode::env()->ram_session_cap();
}
Genode::Cpu_session_capability cpu_session_cap() override
{
return Genode::env()->cpu_session_cap();
}
};
}
namespace Genode {
struct Startup;
extern void bootstrap_component();
}
/*
* We need to execute the constructor of the main entrypoint from a
* class called 'Startup' as 'Startup' is a friend of 'Entrypoint'.
*/
struct Genode::Startup
{
::Env env { ep };
/*
* The construction of the main entrypoint does never return.
*/
Entrypoint ep { env };
};
void Genode::bootstrap_component()
{
static Startup startup;
/* never reached */
}

View File

@ -0,0 +1,340 @@
/*
* \brief Output of format strings
* \author Norman Feske
* \date 2006-04-07
*
* NOTE: Support for long long ints is not required by Core.
* Hence, this functionality and further features such
* as floating point numbers should better be placed
* in another 'rich_conole' lib outside of the Genode's
* base repository.
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/console.h>
#include <base/stdint.h>
using namespace Genode;
/**
* Format string command representation
*/
class Format_command
{
public:
enum Type { INT, UINT, STRING, CHAR, PTR, PERCENT, INVALID };
enum Length { DEFAULT, LONG, SIZE_T, LONG_LONG };
private:
/**
* Read decimal value from string
*/
int decode_decimal(const char *str, int *consumed)
{
int res = 0;
while (1) {
char c = str[*consumed];
if (!c || c < '0' || c > '0' + 9)
return res;
res = (res * 10) + c - '0';
(*consumed)++;
}
}
public:
Type type = INVALID; /* format argument type */
Length length = DEFAULT; /* format argument length */
int padding = 0; /* min number of characters to print */
int base = 10; /* base of numeric arguments */
bool zeropad = false; /* pad with zero instead of space */
bool uppercase = false; /* use upper case for hex numbers */
int consumed = 0; /* nb of consumed format string chars */
/**
* Constructor
*
* \param format begin of command in format string
*/
explicit Format_command(const char *format)
{
/* check for command begin and eat the character */
if (format[consumed] != '%') return;
if (!format[++consumed]) return;
/* heading zero indicates zero-padding */
zeropad = (format[consumed] == '0');
/* read decimal padding value */
padding = decode_decimal(format, &consumed);
if (!format[consumed]) return;
/* decode length */
switch (format[consumed]) {
case 'l':
{
/* long long ints are marked by a subsequenting 'l' character */
bool is_long_long = (format[consumed + 1] == 'l');
length = is_long_long ? LONG_LONG : LONG;
consumed += is_long_long ? 2 : 1;
break;
}
case 'z':
length = SIZE_T;
consumed++;
break;
case 'p':
length = LONG;
break;
default: break;
}
if (!format[consumed]) return;
/* decode type */
switch (format[consumed]) {
case 'd':
case 'i': type = INT; base = 10; break;
case 'o': type = UINT; base = 8; break;
case 'u': type = UINT; base = 10; break;
case 'x': type = UINT; base = 16; break;
case 'X': type = UINT; base = 16; uppercase = 1; break;
case 'p': type = PTR; base = 16; break;
case 'c': type = CHAR; break;
case 's': type = STRING; break;
case '%': type = PERCENT; break;
case 0: return;
default: break;
}
/* eat type character */
consumed++;
}
int numeric()
{
return (type == INT || type == UINT || type == PTR);
}
};
/**
* Convert digit to ASCII value
*/
static char ascii(int digit, int uppercase = 0)
{
if (digit > 9)
return digit + (uppercase ? 'A' : 'a') - 10;
return digit + '0';
}
/**
* Output signed value with the specified base
*/
template <typename T>
void Console::_out_signed(T value, unsigned base)
{
/**
* for base 8, the number of digits is the number of value bytes times 3
* at a max, because 0xff is 0o377 and accumulating this implies a
* strictly decreasing factor
*/
char buf[sizeof(value)*3];
/* set flag if value is negative */
int neg = value < 0 ? 1 : 0;
/* get absolute value */
value = value < 0 ? -value : value;
int i = 0;
/* handle zero as special case */
if (value == 0)
buf[i++] = ascii(0);
/* fill buffer starting with the least significant digits */
else
for (; value > 0; value /= base)
buf[i++] = ascii(value % base);
/* add sign to buffer for negative values */
if (neg)
_out_char('-');
/* output buffer in reverse order */
for (; i--; )
_out_char(buf[i]);
}
/**
* Output unsigned value with the specified base and padding
*/
template <typename T>
void Console::_out_unsigned(T value, unsigned base, int pad)
{
/**
* for base 8, the number of digits is the number of value bytes times 3
* at a max, because 0xff is 0o377 and accumulating this implies a
* strictly decreasing factor
*/
char buf[sizeof(value)*3];
int i = 0;
/* handle zero as special case */
if (value == 0) {
buf[i++] = ascii(0);
pad--;
}
/* fill buffer starting with the least significant digits */
for (; value > 0; value /= base, pad--)
buf[i++] = ascii(value % base);
/* add padding zeros */
for (; pad-- > 0; )
_out_char(ascii(0));
/* output buffer in reverse order */
for (; i--; )
_out_char(buf[i]);
}
void Console::_out_string(const char *str)
{
if (!str)
_out_string("<NULL>");
else
while (*str) _out_char(*str++);
}
void Console::printf(const char *format, ...)
{
va_list list;
va_start(list, format);
vprintf(format, list);
va_end(list);
}
void Console::vprintf(const char *format, va_list list)
{
while (*format) {
/* eat and output plain characters */
if (*format != '%') {
_out_char(*format++);
continue;
}
/* parse format argument descriptor */
Format_command cmd(format);
/* read numeric argument from va_list */
long long numeric_arg = 0;
if (cmd.numeric()) {
switch (cmd.length) {
case Format_command::LONG_LONG:
numeric_arg = va_arg(list, long long);
break;
case Format_command::LONG:
numeric_arg = (cmd.type == Format_command::UINT) ?
(long long)va_arg(list, unsigned long) : va_arg(list, long);
break;
case Format_command::SIZE_T:
numeric_arg = va_arg(list, size_t);
break;
case Format_command::DEFAULT:
numeric_arg = (cmd.type == Format_command::UINT) ?
(long long)va_arg(list, unsigned int) : va_arg(list, int);
break;
}
}
/* call type-specific output routines */
switch (cmd.type) {
case Format_command::INT:
if (cmd.length == Format_command::LONG_LONG)
_out_signed<long long>(numeric_arg, cmd.base);
else
_out_signed<long>(numeric_arg, cmd.base);
break;
case Format_command::UINT:
if (cmd.length == Format_command::LONG_LONG) {
_out_unsigned<unsigned long long>(numeric_arg, cmd.base, cmd.padding);
break;
}
/* fall through */
case Format_command::PTR:
_out_unsigned<unsigned long>(numeric_arg, cmd.base, cmd.padding);
break;
case Format_command::CHAR:
_out_char(va_arg(list, int));
break;
case Format_command::STRING:
_out_string(va_arg(list, const char *));
break;
case Format_command::PERCENT:
_out_char('%');
break;
case Format_command::INVALID:
_out_string("<warning: unsupported format string argument>");
/* consume the argument of the unsupported command */
va_arg(list, long);
break;
}
/* proceed with format string after command */
format += cmd.consumed;
}
}

View File

@ -0,0 +1,76 @@
/*
* \brief Core-specific 'printf' implementation
* \author Norman Feske
* \date 2010-08-31
*
* In contrast to regular Genode processes, which use the platform-
* independent LOG-session interface as back end of 'printf', core has
* to rely on a platform-specific back end such as a serial driver or a
* kernel-debugger function. The platform-specific back end is called
* 'Core_console'.
*
* This file contains the generic glue code between 'printf' and
* 'Core_console'.
*/
/*
* Copyright (C) 2010-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/printf.h>
#include <base/lock.h>
/* base-internal includes */
#include <base/internal/core_console.h>
using namespace Genode;
/**
* Synchronized version of the core console
*
* This class synchronizes calls of the 'Console::vprintf' function as
* used by 'printf' and 'vprintf' to prevent multiple printf-using
* threads within core from interfering with each other.
*/
struct Synchronized_core_console : public Core_console, public Lock
{
void vprintf(const char *format, va_list list)
{
Lock::Guard lock_guard(*this);
Core_console::vprintf(format, list);
}
};
/**
* Return singleton instance of synchronized core console
*/
static Synchronized_core_console &core_console()
{
static Synchronized_core_console _console;
return _console;
}
void Genode::printf(const char *format, ...)
{
va_list list;
va_start(list, format);
core_console().vprintf(format, list);
va_end(list);
}
void Genode::vprintf(const char *format, va_list list)
{
core_console().vprintf(format, list);
}

View File

@ -0,0 +1,159 @@
/**
* \brief ELF binary utility
* \author Christian Helmuth
* \date 2006-05-04
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/printf.h>
#include <util/string.h>
/* base-internal includes */
#include <base/internal/elf_format.h>
#include <base/internal/elf.h>
using namespace Genode;
int Elf_binary::_ehdr_check_compat()
{
Elf_Ehdr *ehdr = (Elf_Ehdr *)_start;
if (memcmp(ehdr, ELFMAG, SELFMAG) != 0) {
PERR("binary is not an ELF");
return -1;
}
if (ehdr->e_ident[EI_CLASS] != ELFCLASS) {
PERR("support for 32/64-bit objects only");
return -1;
}
/* start executeables and shared objects with entry points only */
if (!(ehdr->e_type == ET_EXEC || (ehdr->e_type == ET_DYN && ehdr->e_entry))) {
PERR("program is no executable");
return -1;
}
return 0;
}
bool inline Elf_binary::_dynamic_check_compat(unsigned type)
{
switch (type) {
case PT_NULL:
case PT_LOAD:
case PT_DYNAMIC:
case PT_INTERP:
case PT_PHDR:
case PT_GNU_EH_FRAME:
case PT_GNU_STACK:
case PT_GNU_RELRO:
case PT_TLS:
case PT_NOTE:
return true;
default:
break;
}
if (type >= PT_LOPROC && type <= PT_HIPROC)
return true;
return false;
}
int Elf_binary::_ph_table_check_compat()
{
Elf_Phdr *ph_table = (Elf_Phdr *)_ph_table;
unsigned num = _phnum;
unsigned i;
for (i = 0; i < num; i++) {
if (!_dynamic_check_compat(ph_table[i].p_type) /* ignored */) {
PWRN("unsupported program segment type 0x%x", ph_table[i].p_type);
return -1;
}
if (ph_table[i].p_type == PT_LOAD)
if (ph_table[i].p_align & (0x1000 - 1)) {
PWRN("unsupported alignment 0x%lx", (unsigned long) ph_table[i].p_align);
return -1;
}
if (ph_table[i].p_type == PT_DYNAMIC)
_dynamic = true;
if (ph_table[i].p_type == PT_INTERP) {
Elf_Phdr *phdr = &((Elf_Phdr *)_ph_table)[i];
char *interp = (char *)(_start + phdr->p_offset);
if (!strcmp(interp, "ld.lib.so"))
_interp = true;
}
}
return 0;
}
Elf_segment Elf_binary::get_segment(unsigned num)
{
void *start;
size_t offset, filesz, memsz;
Elf_binary::Flags flags = { 0, 0, 0, 0 };
if (!valid()) return Elf_segment();
if (!(num < _phnum)) return Elf_segment();
Elf_Phdr *phdr = &((Elf_Phdr *)_ph_table)[num];
start = (void *)phdr->p_vaddr;
offset = phdr->p_offset;
filesz = phdr->p_filesz;
memsz = phdr->p_memsz;
flags.r = (phdr->p_flags & PF_R) ? 1 : 0;
flags.w = (phdr->p_flags & PF_W) ? 1 : 0;
flags.x = (phdr->p_flags & PF_X) ? 1 : 0;
/*
* Skip loading of ELF segments that are not PT_LOAD or have no memory
* size.
*/
if (phdr->p_type != PT_LOAD || !memsz)
flags.skip = 1;
return Elf_segment(this, start, offset, filesz, memsz, flags);
}
Elf_binary::Elf_binary(addr_t start)
: _valid(false), _dynamic(false), _interp(false), _start(start)
{
Elf_Ehdr *ehdr = (Elf_Ehdr *)start;
/* check for unsupported ELF features */
if (_ehdr_check_compat()) return;
/* program entry point */
if (!(_entry = ehdr->e_entry)) return;
/* segment tables */
_ph_table = _start + ehdr->e_phoff;
_phentsize = ehdr->e_phentsize;
_phnum = ehdr->e_phnum;
/* program segments */
if (_ph_table_check_compat()) return;
/* ready to rock */
_valid = true;
}

View File

@ -0,0 +1,191 @@
/*
* \brief Entrypoint for serving RPC requests and dispatching signals
* \author Norman Feske
* \author Christian Helmuth
* \date 2015-12-17
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/entrypoint.h>
#include <base/component.h>
#include <cap_session/connection.h>
#include <util/retry.h>
using namespace Genode;
/*
* XXX move declarations to base-internal headers
*/
namespace Genode {
extern bool inhibit_tracing;
void call_global_static_constructors();
void init_signal_thread();
void destroy_signal_thread();
extern void (*call_component_construct)(Genode::Env &);
}
void Entrypoint::_dispatch_signal(Signal &sig)
{
Signal_dispatcher_base *dispatcher = 0;
dispatcher = dynamic_cast<Signal_dispatcher_base *>(sig.context());
if (!dispatcher)
return;
dispatcher->dispatch(sig.num());
}
void Entrypoint::_process_incoming_signals()
{
for (;;) {
do {
_sig_rec->block_for_signal();
/*
* It might happen that we try to forward a signal to the
* entrypoint, while the context of that signal is already
* destroyed. In that case we will get an ipc error exception
* as result, which has to be caught.
*/
retry<Genode::Blocking_canceled>(
[&] () { _signal_proxy_cap.call<Signal_proxy::Rpc_signal>(); },
[] () { PWRN("blocking canceled during signal processing"); }
);
} while (!_suspended_callback);
_suspend_dispatcher.destruct();
_sig_rec.destruct();
dissolve(_signal_proxy);
_signal_proxy_cap = Capability<Signal_proxy>();
_rpc_ep.destruct();
destroy_signal_thread();
/* execute fork magic in noux plugin */
_suspended_callback();
init_signal_thread();
_rpc_ep.construct(&_env.pd(), Component::stack_size(), Component::name());
_signal_proxy_cap = manage(_signal_proxy);
_sig_rec.construct();
/*
* Before calling the resumed callback, we reset the callback pointer
* as these may be set again in the resumed code to initiate the next
* suspend-resume cycle (e.g., exit()).
*/
void (*resumed_callback)() = _resumed_callback;
_suspended_callback = nullptr;
_resumed_callback = nullptr;
resumed_callback();
}
}
void Entrypoint::schedule_suspend(void (*suspended)(), void (*resumed)())
{
_suspended_callback = suspended;
_resumed_callback = resumed;
/*
* We always construct the dispatcher when the suspend is scheduled and
* destruct it when the suspend is executed.
*/
_suspend_dispatcher.construct(*this, *this, &Entrypoint::_handle_suspend);
/* trigger wakeup of the signal-dispatch loop for suspend */
Genode::Signal_transmitter(*_suspend_dispatcher).submit();
}
Signal_context_capability Entrypoint::manage(Signal_dispatcher_base &dispatcher)
{
return _sig_rec->manage(&dispatcher);
}
void Genode::Entrypoint::dissolve(Signal_dispatcher_base &dispatcher)
{
_sig_rec->dissolve(&dispatcher);
}
namespace {
struct Constructor
{
GENODE_RPC(Rpc_construct, void, construct);
GENODE_RPC_INTERFACE(Rpc_construct);
};
struct Constructor_component : Rpc_object<Constructor, Constructor_component>
{
Env &env;
Constructor_component(Env &env) : env(env) { }
void construct()
{
/* enable tracing support */
Genode::inhibit_tracing = false;
Genode::call_global_static_constructors();
Genode::call_component_construct(env);
}
};
}
Entrypoint::Entrypoint(Env &env)
:
_env(env),
_rpc_ep(&env.pd(), Component::stack_size(), Component::name())
{
/* initialize signalling after initializing but before calling the entrypoint */
init_signal_thread();
/*
* Invoke Component::construct function in the context of the entrypoint.
*/
Constructor_component constructor(env);
Capability<Constructor> constructor_cap =
_rpc_ep->manage(&constructor);
try {
constructor_cap.call<Constructor::Rpc_construct>();
} catch (Genode::Blocking_canceled) {
PWRN("blocking canceled in entrypoint constructor");
}
_rpc_ep->dissolve(&constructor);
/*
* The calling initial thread becomes the signal proxy thread for this
* entrypoint
*/
_process_incoming_signals();
}
Entrypoint::Entrypoint(Env &env, size_t stack_size, char const *name)
:
_env(env),
_rpc_ep(&env.pd(), stack_size, name)
{
_signal_proxy_thread.construct(*this);
}

View File

@ -0,0 +1,77 @@
/*
* \brief Environment initialization
* \author Norman Feske
* \author Christian Helmuth
* \date 2006-07-27
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/internal/platform_env.h>
namespace Genode {
/*
* Request pointer to static environment of the Genode application
*/
Env_deprecated *env()
{
/*
* By placing the environment as static object here, we ensure that its
* constructor gets called when this function is used the first time.
*/
static Genode::Platform_env _env;
return &_env;
}
}
static Genode::Signal_receiver *resource_sig_rec()
{
static Genode::Signal_receiver sig_rec;
return &sig_rec;
}
Genode::Signal_context_capability
Genode::Expanding_parent_client::_fallback_sig_cap()
{
static Signal_context _sig_ctx;
static Signal_context_capability _sig_cap;
/* create signal-context capability only once */
if (!_sig_cap.valid()) {
/*
* Because the 'manage' function consumes meta data of the signal
* session, calling it may result in an 'Out_of_metadata' error. The
* 'manage' function handles this error by upgrading the session quota
* accordingly. However, this upgrade, in turn, may result in the
* depletion of the process' RAM quota. In this case, the process would
* issue a resource request to the parent. But in order to do so, the
* fallback signal handler has to be constructed. To solve this
* hen-and-egg problem, we allocate a so-called emergency RAM reserve
* immediately at the startup of the process as part of the
* 'Platform_env'. When initializing the fallback signal handler, these
* resources get released in order to ensure an eventual upgrade of the
* signal session to succeed.
*
* The corner case is tested by 'os/src/test/resource_request'.
*/
_emergency_ram_reserve.release();
_sig_cap = resource_sig_rec()->manage(&_sig_ctx);
}
return _sig_cap;
}
void Genode::Expanding_parent_client::_wait_for_resource_response()
{
resource_sig_rec()->wait_for_signal();
}

View File

@ -0,0 +1,93 @@
/*
* \brief Environment reinitialization
* \author Norman Feske
* \author Martin Stein
* \date 2012-02-16
*/
/*
* Copyright (C) 2012-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <util/construct_at.h>
#include <rm_session/connection.h>
/* base-internal includes */
#include <base/internal/platform_env.h>
#include <base/internal/crt0.h>
void prepare_reinit_main_thread();
void reinit_main_thread();
namespace Genode { extern bool inhibit_tracing; }
void Genode::Platform_env::reinit(Native_capability::Dst dst,
long local_name)
{
/*
* This function is unused during the normal operation of Genode. It is
* relevant only for implementing fork semantics such as provided by the
* Noux execution environment.
*
* The function is called by the freshly created process right after the
* fork happened.
*
* The existing 'Platform_env' object contains capabilities that are
* meaningful for the forking process but not the new process. Before the
* the environment can be used, it must be reinitialized with the resources
* provided by the actual parent.
*/
/* avoid RPCs by the tracing framework as long as we have no valid env */
inhibit_tracing = true;
/* do platform specific preparation */
prepare_reinit_main_thread();
/*
* Patch new parent capability into the original location as specified by
* the linker script.
*/
Native_capability::Raw *raw = (Native_capability::Raw *)(&_parent_cap);
raw->dst = dst;
raw->local_name = local_name;
/*
* Re-initialize 'Platform_env' members
*/
Expanding_parent_client * const p = &_parent_client;
construct_at<Expanding_parent_client>(p, parent_cap(), *this);
construct_at<Resources>(&_resources, _parent_client);
/*
* Keep information about dynamically allocated memory but use the new
* resources as backing store. Note that the capabilites of the already
* allocated backing-store dataspaces are rendered meaningless. But this is
* no problem because they are used by the 'Heap' destructor only, which is
* never called for heap instance of 'Platform_env'.
*/
_heap.reassign_resources(&_resources.ram, &_resources.rm);
}
void
Genode::Platform_env::
reinit_main_thread(Capability<Region_map> &stack_area_rm)
{
/* reinitialize stack area RM session */
Region_map * const rms = env_stack_area_region_map;
Region_map_client * const rmc = dynamic_cast<Region_map_client *>(rms);
construct_at<Region_map_client>(rmc, stack_area_rm);
/* reinitialize main-thread object */
::reinit_main_thread();
/* re-enable tracing */
inhibit_tracing = false;
}

View File

@ -0,0 +1,292 @@
/*
* \brief Implementation of Genode heap partition
* \author Norman Feske
* \date 2006-05-17
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <util/construct_at.h>
#include <base/env.h>
#include <base/printf.h>
#include <base/heap.h>
#include <base/lock.h>
using namespace Genode;
namespace {
enum {
MIN_CHUNK_SIZE = 4*1024, /* in machine words */
MAX_CHUNK_SIZE = 256*1024,
/*
* Allocation sizes >= this value are considered as big
* allocations, which get their own dataspace. In contrast
* to smaller allocations, this memory is released to
* the RAM session when 'free()' is called.
*/
BIG_ALLOCATION_THRESHOLD = 64*1024 /* in bytes */
};
}
Heap::Dataspace_pool::~Dataspace_pool()
{
/* free all ram_dataspaces */
for (Dataspace *ds; (ds = first()); ) {
/*
* read dataspace capability and modify _ds_list before detaching
* possible backing store for Dataspace - we rely on LIFO list
* manipulation here!
*/
Ram_dataspace_capability ds_cap = ds->cap;
void *ds_local_addr = ds->local_addr;
remove(ds);
/*
* Call 'Dataspace' destructor to properly release the RAM dataspace
* capabilities. Note that we don't free the 'Dataspace' object at the
* local allocator because this is already done by the 'Heap'
* destructor prior executing the 'Dataspace_pool' destructor.
*/
ds->~Dataspace();
region_map->detach(ds_local_addr);
ram_session->free(ds_cap);
}
}
int Heap::quota_limit(size_t new_quota_limit)
{
if (new_quota_limit < _quota_used) return -1;
_quota_limit = new_quota_limit;
return 0;
}
Heap::Dataspace *Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
{
Ram_dataspace_capability new_ds_cap;
void *ds_addr = 0;
void *ds_meta_data_addr = 0;
Heap::Dataspace *ds = 0;
/* make new ram dataspace available at our local address space */
try {
new_ds_cap = _ds_pool.ram_session->alloc(size);
ds_addr = _ds_pool.region_map->attach(new_ds_cap);
} catch (Ram_session::Alloc_failed) {
PWRN("could not allocate new dataspace of size %zu", size);
return 0;
} catch (Region_map::Attach_failed) {
PWRN("could not attach dataspace");
_ds_pool.ram_session->free(new_ds_cap);
return 0;
}
if (enforce_separate_metadata) {
/* allocate the Dataspace structure */
if (_unsynchronized_alloc(sizeof(Heap::Dataspace), &ds_meta_data_addr) < 0) {
PWRN("could not allocate dataspace meta data");
return 0;
}
} else {
/* add new local address range to our local allocator */
_alloc->add_range((addr_t)ds_addr, size);
/* allocate the Dataspace structure */
if (_alloc->alloc_aligned(sizeof(Heap::Dataspace), &ds_meta_data_addr, log2(sizeof(addr_t))).is_error()) {
PWRN("could not allocate dataspace meta data - this should never happen");
return 0;
}
}
ds = construct_at<Dataspace>(ds_meta_data_addr, new_ds_cap, ds_addr, size);
_ds_pool.insert(ds);
return ds;
}
bool Heap::_try_local_alloc(size_t size, void **out_addr)
{
if (_alloc->alloc_aligned(size, out_addr, log2(sizeof(addr_t))).is_error())
return false;
_quota_used += size;
return true;
}
bool Heap::_unsynchronized_alloc(size_t size, void **out_addr)
{
size_t dataspace_size;
if (size >= BIG_ALLOCATION_THRESHOLD) {
/*
* big allocation
*
* in this case, we allocate one dataspace without any meta data in it
* and return its local address without going through the allocator.
*/
/* align to 4K page */
dataspace_size = align_addr(size, 12);
Heap::Dataspace *ds = _allocate_dataspace(dataspace_size, true);
if (!ds) {
PWRN("could not allocate dataspace");
return false;
}
_quota_used += ds->size;
*out_addr = ds->local_addr;
return true;
}
/* try allocation at our local allocator */
if (_try_local_alloc(size, out_addr))
return true;
/*
* Calculate block size of needed backing store. The block must hold the
* requested 'size' and we add some space for meta data
* ('Dataspace' structures, AVL-node slab blocks).
* Finally, we align the size to a 4K page.
*/
dataspace_size = size + Allocator_avl::slab_block_size() + sizeof(Heap::Dataspace);
/*
* '_chunk_size' is a multiple of 4K, so 'dataspace_size' becomes
* 4K-aligned, too.
*/
size_t const request_size = _chunk_size * sizeof(umword_t);
if ((dataspace_size < request_size) &&
_allocate_dataspace(request_size, false)) {
/*
* Exponentially increase chunk size with each allocated chunk until
* we hit 'MAX_CHUNK_SIZE'.
*/
_chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE);
} else {
/* align to 4K page */
dataspace_size = align_addr(dataspace_size, 12);
if (!_allocate_dataspace(dataspace_size, false))
return false;
}
/* allocate originally requested block */
return _try_local_alloc(size, out_addr);
}
bool Heap::alloc(size_t size, void **out_addr)
{
/* serialize access of heap functions */
Lock::Guard lock_guard(_lock);
/* check requested allocation against quota limit */
if (size + _quota_used > _quota_limit)
return false;
return _unsynchronized_alloc(size, out_addr);
}
void Heap::free(void *addr, size_t size)
{
/* serialize access of heap functions */
Lock::Guard lock_guard(_lock);
if (size >= BIG_ALLOCATION_THRESHOLD) {
Heap::Dataspace *ds;
for (ds = _ds_pool.first(); ds; ds = ds->next())
if (((addr_t)addr >= (addr_t)ds->local_addr) &&
((addr_t)addr <= (addr_t)ds->local_addr + ds->size - 1))
break;
_ds_pool.remove(ds);
_ds_pool.region_map->detach(ds->local_addr);
_ds_pool.ram_session->free(ds->cap);
_quota_used -= ds->size;
destroy(*_alloc, ds);
} else {
/*
* forward request to our local allocator
*/
_alloc->free(addr, size);
_quota_used -= size;
}
}
Heap::Heap(Ram_session *ram_session,
Region_map *region_map,
size_t quota_limit,
void *static_addr,
size_t static_size)
:
_alloc(nullptr),
_ds_pool(ram_session, region_map),
_quota_limit(quota_limit), _quota_used(0),
_chunk_size(MIN_CHUNK_SIZE)
{
if (static_addr)
_alloc->add_range((addr_t)static_addr, static_size);
}
Heap::~Heap()
{
/*
* Revert allocations of heap-internal 'Dataspace' objects. Otherwise, the
* subsequent destruction of the 'Allocator_avl' would detect those blocks
* as dangling allocations.
*
* Since no new allocations can occur at the destruction time of the
* 'Heap', it is safe to release the 'Dataspace' objects at the allocator
* yet still access them afterwards during the destruction of the
* 'Allocator_avl'.
*/
for (Heap::Dataspace *ds = _ds_pool.first(); ds; ds = ds->next())
_alloc->free(ds, sizeof(Dataspace));
/*
* Destruct 'Allocator_avl' before destructing the dataspace pool. This
* order is important because some dataspaces of the dataspace pool are
* used as backing store for the allocator's meta data. If we destroyed
* the object pool before the allocator, the subsequent attempt to destruct
* the allocator would access no-longer-present backing store.
*/
_alloc.destruct();
}

View File

@ -0,0 +1,178 @@
/*
* \brief Lock implementation
* \author Norman Feske
* \date 2009-03-25
*/
/*
* Copyright (C) 2009-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/cancelable_lock.h>
#include <cpu/memory_barrier.h>
/* base-internal includes */
#include <base/internal/spin_lock.h>
using namespace Genode;
static inline Genode::Thread_base *invalid_thread_base()
{
return (Genode::Thread_base*)~0;
}
static inline bool thread_base_valid(Genode::Thread_base *thread_base)
{
return (thread_base != invalid_thread_base());
}
/********************
** Lock applicant **
********************/
void Cancelable_lock::Applicant::wake_up()
{
if (!thread_base_valid(_thread_base)) return;
/*
* Deal with the race that may occur in the 'lock' function between
* releasing the spinlock and blocking myself.
*/
for (;;) {
if (thread_check_stopped_and_restart(_thread_base))
return;
thread_switch_to(_thread_base);
}
}
/*********************
** Cancelable lock **
*********************/
void Cancelable_lock::lock()
{
Applicant myself(Thread_base::myself());
spinlock_lock(&_spinlock_state);
/* reset ownership if one thread 'lock' twice */
if (_owner == myself)
_owner = Applicant(invalid_thread_base());
if (cmpxchg(&_state, UNLOCKED, LOCKED)) {
/* we got the lock */
_owner = myself;
_last_applicant = &_owner;
spinlock_unlock(&_spinlock_state);
return;
}
/*
* We failed to grab the lock, lets add ourself to the
* list of applicants and block for the current lock holder.
*/
_last_applicant->applicant_to_wake_up(&myself);
_last_applicant = &myself;
spinlock_unlock(&_spinlock_state);
/*
* At this point, a race can happen. We have added ourself to the wait
* queue but do not block yet. If we get preempted here, the lock holder
* may call 'unlock' and thereby find us as the next applicant to wake up.
* However, the 'L4_Start' call will then be issued before we went to sleep
* via 'L4_Stop'. When we get scheduled for the next time, we are expected
* to enter the critical section but we will execute 'L4_Stop' instead.
* We handle this case in the 'unlock' function by checking the previous
* thread state when resuming its execution.
*
* Note for testing: To artificially increase the chance for triggering the
* race condition, we can delay the execution here. For example via:
*
* ! for (int i = 0; i < 10; i++)
* ! thread_yield();
*/
thread_stop_myself();
/*
* We expect to be the lock owner when woken up. If this is not
* the case, the blocking was canceled via core's cancel-blocking
* mechanism. We have to dequeue ourself from the list of applicants
* and reflect this condition as a C++ exception.
*/
spinlock_lock(&_spinlock_state);
if (_owner != myself) {
/*
* Check if we are the applicant to be waken up next,
* otherwise, go through the list of remaining applicants
*/
for (Applicant *a = &_owner; a; a = a->applicant_to_wake_up()) {
/* remove reference to ourself from the applicants list */
if (a->applicant_to_wake_up() == &myself) {
a->applicant_to_wake_up(myself.applicant_to_wake_up());
if (_last_applicant == &myself)
_last_applicant = a;
break;
}
}
spinlock_unlock(&_spinlock_state);
throw Blocking_canceled();
}
spinlock_unlock(&_spinlock_state);
}
void Cancelable_lock::unlock()
{
spinlock_lock(&_spinlock_state);
Applicant *next_owner = _owner.applicant_to_wake_up();
if (next_owner) {
/* transfer lock ownership to next applicant and wake him up */
_owner = *next_owner;
if (_last_applicant == next_owner)
_last_applicant = &_owner;
spinlock_unlock(&_spinlock_state);
_owner.wake_up();
} else {
/* there is no further applicant, leave the lock alone */
_owner = Applicant(invalid_thread_base());
_last_applicant = 0;
_state = UNLOCKED;
spinlock_unlock(&_spinlock_state);
}
}
Cancelable_lock::Cancelable_lock(Cancelable_lock::State initial)
:
_spinlock_state(SPINLOCK_UNLOCKED),
_state(UNLOCKED),
_last_applicant(0),
_owner(invalid_thread_base())
{
if (initial == LOCKED)
lock();
}

View File

@ -0,0 +1,136 @@
/*
* \brief Printf backend for the LOG interface
* \author Norman Feske
* \date 2006-09-15
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <log_session/connection.h>
#include <base/printf.h>
#include <base/console.h>
#include <base/lock.h>
#include <base/env.h>
#include <base/internal/unmanaged_singleton.h>
using namespace Genode;
class Log_console : public Console
{
private:
enum { _BUF_SIZE = Log_session::MAX_STRING_LEN };
Log_connection _log;
char _buf[_BUF_SIZE];
unsigned _num_chars;
Lock _lock;
void _flush()
{
/* null-terminate string */
_buf[_num_chars] = 0;
_log.write(_buf);
/* restart with empty buffer */
_num_chars = 0;
}
protected:
void _out_char(char c)
{
/* ensure enough buffer space for complete escape sequence */
if ((c == 27) && (_num_chars + 8 > _BUF_SIZE)) _flush();
_buf[_num_chars++] = c;
/* flush immediately on line break */
if (c == '\n' || _num_chars >= sizeof(_buf) - 1)
_flush();
}
public:
/**
* Constructor
*/
Log_console()
:
_num_chars(0)
{ }
/**
* Console interface
*/
void vprintf(const char *format, va_list list)
{
Lock::Guard lock_guard(_lock);
Console::vprintf(format, list);
}
/**
* Return LOG session interface
*/
Log_session *log_session() { return &_log; }
/**
* Re-establish LOG session
*/
void reconnect()
{
/*
* Note that the destructor of old 'Log_connection' is not called.
* This is not needed because the only designated use of this
* function is the startup procedure of noux processes created
* via fork. At the point of calling this function, the new child
* has no valid capability to the original LOG session anyway.
*/
new (&_log) Log_connection;
}
};
/*
* In the presence of a libC, we use the libC's full printf implementation and
* use the 'Log_console' as backend.
*/
static Log_console *stdout_log_console() { return unmanaged_singleton<Log_console>(); }
/**
* Hook for supporting libc back ends for stdio
*/
extern "C" int stdout_write(const char *s)
{
return stdout_log_console()->log_session()->write(s);
}
/**
* Hook for support the 'fork' implementation of the noux libc backend
*/
extern "C" void stdout_reconnect() { stdout_log_console()->reconnect(); }
void Genode::printf(const char *format, ...)
{
va_list list;
va_start(list, format);
vprintf(format, list);
va_end(list);
}
void Genode::vprintf(const char *format, va_list list)
{
stdout_log_console()->vprintf(format, list);
}

View File

@ -0,0 +1,44 @@
/*
* \brief Client-side stub for region map
* \author Norman Feske
* \date 2016-01-22
*/
/*
* Copyright (C) 2016 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <region_map/client.h>
using namespace Genode;
Region_map_client::Region_map_client(Capability<Region_map> cap)
: Rpc_client<Region_map>(cap) { }
Region_map::Local_addr
Region_map_client::attach(Dataspace_capability ds, size_t size, off_t offset,
bool use_local_addr, Local_addr local_addr,
bool executable)
{
return call<Rpc_attach>(ds, size, offset, use_local_addr, local_addr,
executable);
}
void Region_map_client::detach(Local_addr local_addr) {
call<Rpc_detach>(local_addr); }
void Region_map_client::fault_handler(Signal_context_capability cap) {
call<Rpc_fault_handler>(cap); }
Region_map::State Region_map_client::state() { return call<Rpc_state>(); }
Dataspace_capability Region_map_client::dataspace() { return call<Rpc_dataspace>(); }

View File

@ -0,0 +1,29 @@
/*
* \brief Client-side stub for RM session
* \author Norman Feske
* \date 2016-04-19
*/
/*
* Copyright (C) 2016 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <rm_session/client.h>
using namespace Genode;
Rm_session_client::Rm_session_client(Capability<Rm_session> cap)
: Rpc_client<Rm_session>(cap) { }
Capability<Region_map> Rm_session_client::create(size_t size) {
return call<Rpc_create>(size); }
void Rm_session_client::destroy(Capability<Region_map> cap) {
call<Rpc_destroy>(cap); }

View File

@ -0,0 +1,44 @@
/*
* \brief RPC entrypoint support for allocating RPC object capabilities
* \author Norman Feske
* \date 2016-01-19
*/
/*
* Copyright (C) 2016 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/env.h>
#include <util/retry.h>
#include <base/rpc_server.h>
#include <pd_session/client.h>
using namespace Genode;
Native_capability Rpc_entrypoint::_alloc_rpc_cap(Pd_session &pd,
Native_capability ep, addr_t)
{
Untyped_capability new_obj_cap =
retry<Genode::Pd_session::Out_of_metadata>(
[&] () { return pd.alloc_rpc_cap(_cap); },
[&] () {
Pd_session_client *client =
dynamic_cast<Pd_session_client*>(&pd);
if (client)
env()->parent()->upgrade(*client, "ram_quota=16K");
});
return new_obj_cap;
}
void Rpc_entrypoint::_free_rpc_cap(Pd_session &pd, Native_capability cap)
{
return pd.free_rpc_cap(cap);
}

View File

@ -0,0 +1,86 @@
/*
* \brief Default version of platform-specific part of RPC framework
* \author Norman Feske
* \date 2006-05-12
*/
/*
* Copyright (C) 2006-2016 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <util/retry.h>
#include <base/rpc_server.h>
#include <base/printf.h>
/* base-internal includes */
#include <base/internal/ipc_server.h>
using namespace Genode;
/***********************
** Server entrypoint **
***********************/
Untyped_capability Rpc_entrypoint::_manage(Rpc_object_base *obj)
{
Untyped_capability new_obj_cap = _alloc_rpc_cap(_pd_session, _cap);
/* add server object to object pool */
obj->cap(new_obj_cap);
insert(obj);
/* return capability that uses the object id as badge */
return new_obj_cap;
}
void Rpc_entrypoint::entry()
{
Ipc_server srv;
_cap = srv;
_cap_valid.unlock();
/*
* Now, the capability of the server activation is initialized
* an can be passed around. However, the processing of capability
* invocations should not happen until activation-using server
* is completely initialized. Thus, we wait until the activation
* gets explicitly unblocked by calling 'Rpc_entrypoint::activate()'.
*/
_delay_start.lock();
Rpc_exception_code exc = Rpc_exception_code(Rpc_exception_code::INVALID_OBJECT);
while (!_exit_handler.exit) {
Rpc_request const request = ipc_reply_wait(_caller, exc, _snd_buf, _rcv_buf);
_caller = request.caller;
Ipc_unmarshaller unmarshaller(_rcv_buf);
Rpc_opcode opcode(0);
unmarshaller.extract(opcode);
/* set default return value */
exc = Rpc_exception_code(Rpc_exception_code::INVALID_OBJECT);
_snd_buf.reset();
apply(request.badge, [&] (Rpc_object_base *obj)
{
if (!obj) { return;}
try { exc = obj->dispatch(opcode, unmarshaller, _snd_buf); }
catch(Blocking_canceled&) { }
});
}
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
Msgbuf<16> snd_buf;
ipc_reply(_caller, Rpc_exception_code(Rpc_exception_code::SUCCESS), snd_buf);
/* defer the destruction of 'Ipc_server' until '~Rpc_entrypoint' is ready */
_delay_exit.lock();
}

View File

@ -0,0 +1,112 @@
/*
* \brief Platform-independent part of server-side RPC framework
* \author Norman Feske
* \date 2006-05-12
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/rpc_server.h>
#include <base/rpc_client.h>
#include <base/blocking.h>
#include <base/env.h>
/* base-internal includes */
#include <base/internal/ipc_server.h>
using namespace Genode;
void Rpc_entrypoint::_dissolve(Rpc_object_base *obj)
{
/* make sure nobody is able to find this object */
remove(obj);
_free_rpc_cap(_pd_session, obj->cap());
/* effectively invalidate the capability used before */
obj->cap(Untyped_capability());
/* now the object may be safely destructed */
}
void Rpc_entrypoint::_block_until_cap_valid()
{
_cap_valid.lock();
}
void Rpc_entrypoint::reply_signal_info(Untyped_capability reply_cap,
unsigned long imprint, unsigned long cnt)
{
Msgbuf<sizeof(Signal_source::Signal)> snd_buf;
snd_buf.insert(Signal_source::Signal(imprint, cnt));
ipc_reply(reply_cap, Rpc_exception_code(Rpc_exception_code::SUCCESS), snd_buf);
}
void Rpc_entrypoint::activate()
{
_delay_start.unlock();
}
bool Rpc_entrypoint::is_myself() const
{
return (Thread_base::myself() == this);
}
Rpc_entrypoint::Rpc_entrypoint(Pd_session *pd_session, size_t stack_size,
char const *name, bool start_on_construction,
Affinity::Location location)
:
Thread_base(Cpu_session::DEFAULT_WEIGHT, name, stack_size, location),
_cap(Untyped_capability()),
_cap_valid(Lock::LOCKED), _delay_start(Lock::LOCKED),
_delay_exit(Lock::LOCKED),
_pd_session(*pd_session)
{
Thread_base::start();
_block_until_cap_valid();
if (start_on_construction)
activate();
_exit_cap = manage(&_exit_handler);
}
Rpc_entrypoint::~Rpc_entrypoint()
{
/*
* We have to make sure the server loop is running which is only the case
* if the Rpc_entrypoint was actived before we execute the RPC call.
*/
_delay_start.unlock();
/* leave server loop */
_exit_cap.call<Exit::Rpc_exit>();
dissolve(&_exit_handler);
if (!empty())
PWRN("Object pool not empty in %s", __func__);
/*
* Now that we finished the 'dissolve' steps above (which need a working
* 'Ipc_server' in the context of the entrypoint thread), we can allow the
* entrypoint thread to leave the scope. Thereby, the 'Ipc_server' object
* will get destructed.
*/
_delay_exit.unlock();
join();
}

View File

@ -0,0 +1,313 @@
/*
* \brief Generic implementation parts of the signaling framework
* \author Norman Feske
* \date 2008-09-16
*/
/*
* Copyright (C) 2008-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <util/retry.h>
#include <base/env.h>
#include <base/signal.h>
#include <base/thread.h>
#include <base/sleep.h>
#include <base/trace/events.h>
#include <signal_source/client.h>
#include <util/volatile_object.h>
using namespace Genode;
enum { STACK_SIZE = 4*1024*sizeof(addr_t) };
class Signal_handler_thread : Thread<STACK_SIZE>, Lock
{
private:
/**
* Actual signal source
*
* Member must be constructed in the context of the signal handler
* thread because on some platforms (e.g., Fiasco.OC), the calling
* thread context is used for implementing the signal-source protocol.
*/
Lazy_volatile_object<Signal_source_client> _signal_source;
void entry()
{
_signal_source.construct(env()->pd_session()->alloc_signal_source());
unlock();
Signal_receiver::dispatch_signals(&(*_signal_source));
}
public:
/**
* Constructor
*/
Signal_handler_thread()
: Thread<STACK_SIZE>("signal handler"), Lock(Lock::LOCKED)
{
start();
/*
* Make sure the signal source was constructed before proceeding
* with the use of signals. Otherwise, signals may get lost until
* the construction finished.
*/
lock();
}
~Signal_handler_thread()
{
env()->pd_session()->free_signal_source(*_signal_source);
}
};
/*
* The signal-handler thread will be constructed before global constructors are
* called and, consequently, must not be a global static object. Otherwise, the
* Lazy_volatile_object constructor will be executed twice.
*/
static Lazy_volatile_object<Signal_handler_thread> & signal_handler_thread()
{
static Lazy_volatile_object<Signal_handler_thread> inst;
return inst;
}
namespace Genode {
/*
* Initialize the component-local signal-handling thread
*
* This function is called once at the startup of the component. It must
* be called before creating the first signal receiver.
*
* We allow this function to be overridden in to enable core to omit the
* creation of the signal thread.
*/
void init_signal_thread() __attribute__((weak));
void init_signal_thread()
{
signal_handler_thread().construct();
}
void destroy_signal_thread()
{
signal_handler_thread().destruct();
}
}
/*****************************
** Signal context registry **
*****************************/
namespace Genode {
/**
* Facility to validate the liveliness of signal contexts
*
* After dissolving a 'Signal_context' from a 'Signal_receiver', a signal
* belonging to the context may still in flight, i.e., currently processed
* within core or the kernel. Hence, after having received a signal, we
* need to manually check for the liveliness of the associated context.
* Because we cannot trust the signal imprint to represent a valid pointer,
* we need an associative data structure to validate the value. That is the
* role of the 'Signal_context_registry'.
*/
class Signal_context_registry
{
private:
/*
* Currently, the registry is just a linked list. If this becomes a
* scalability problem, we might introduce a more sophisticated
* associative data structure.
*/
Lock mutable _lock;
List<List_element<Signal_context> > _list;
public:
void insert(List_element<Signal_context> *le)
{
Lock::Guard guard(_lock);
_list.insert(le);
}
void remove(List_element<Signal_context> *le)
{
Lock::Guard guard(_lock);
_list.remove(le);
}
bool test_and_lock(Signal_context *context) const
{
Lock::Guard guard(_lock);
/* search list for context */
List_element<Signal_context> const *le = _list.first();
for ( ; le; le = le->next()) {
if (context == le->object()) {
/* lock object */
context->_lock.lock();
return true;
}
}
return false;
}
};
}
/**
* Return process-wide registry of registered signal contexts
*/
Genode::Signal_context_registry *signal_context_registry()
{
static Signal_context_registry inst;
return &inst;
}
/********************
** Signal context **
********************/
void Signal_context::submit(unsigned num)
{
if (!_receiver) {
PWRN("signal context with no receiver");
return;
}
if (!signal_context_registry()->test_and_lock(this)) {
PWRN("encountered dead signal context");
return;
}
/* construct and locally submit signal object */
Signal::Data signal(this, num);
_receiver->local_submit(signal);
/* free context lock that was taken by 'test_and_lock' */
_lock.unlock();
}
/*********************
** Signal receiver **
*********************/
Signal_receiver::Signal_receiver() { }
Signal_context_capability Signal_receiver::manage(Signal_context *context)
{
if (context->_receiver)
throw Context_already_in_use();
context->_receiver = this;
Lock::Guard list_lock_guard(_contexts_lock);
/* insert context into context list */
_contexts.insert(&context->_receiver_le);
/* register context at process-wide registry */
signal_context_registry()->insert(&context->_registry_le);
retry<Pd_session::Out_of_metadata>(
[&] () {
/* use signal context as imprint */
context->_cap = env()->pd_session()->alloc_context(_cap, (long)context);
},
[&] () {
size_t const quota = 1024*sizeof(long);
char buf[64];
snprintf(buf, sizeof(buf), "ram_quota=%zu", quota);
PINF("upgrading quota donation for PD session (%zu bytes)", quota);
env()->parent()->upgrade(env()->pd_session_cap(), buf);
}
);
return context->_cap;
}
void Signal_receiver::block_for_signal()
{
_signal_available.down();
}
void Signal_receiver::local_submit(Signal::Data ns)
{
Signal_context *context = ns.context;
/*
* Replace current signal of the context by signal with accumulated
* counters. In the common case, the current signal is an invalid
* signal with a counter value of zero.
*/
unsigned num = context->_curr_signal.num + ns.num;
context->_curr_signal = Signal::Data(context, num);
/* wake up the receiver if the context becomes pending */
if (!context->_pending) {
context->_pending = true;
_signal_available.up();
}
}
void Signal_receiver::dispatch_signals(Signal_source *signal_source)
{
for (;;) {
Signal_source::Signal source_signal = signal_source->wait_for_signal();
/* look up context as pointed to by the signal imprint */
Signal_context *context = (Signal_context *)(source_signal.imprint());
if (!context) {
PERR("received null signal imprint, stop signal handling");
sleep_forever();
}
if (!signal_context_registry()->test_and_lock(context)) {
PWRN("encountered dead signal context");
continue;
}
if (context->_receiver) {
/* construct and locally submit signal object */
Signal::Data signal(context, source_signal.num());
context->_receiver->local_submit(signal);
} else {
PWRN("signal context with no receiver");
}
/* free context lock that was taken by 'test_and_lock' */
context->_lock.unlock();
}
}
void Signal_receiver::_platform_begin_dissolve(Signal_context *) { }
void Signal_receiver::_platform_finish_dissolve(Signal_context * const c) {
signal_context_registry()->remove(&c->_registry_le); }
void Signal_receiver::_platform_destructor() { }

View File

@ -0,0 +1,235 @@
/*
* \brief Platform-independent part of signal framework
* \author Norman Feske
* \author Christian Prochaska
* \author Martin Stein
* \date 2013-02-21
*/
/*
* Copyright (C) 2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/env.h>
#include <base/signal.h>
#include <base/trace/events.h>
using namespace Genode;
/************
** Signal **
************/
Signal::Signal(Signal const &other)
{
_data.context = other._data.context;
_data.num = other._data.num;
_inc_ref();
}
Signal & Signal::operator=(Signal const &other)
{
if ((_data.context == other._data.context) &&
(_data.num == other._data.num))
return *this;
_dec_ref_and_unlock();
_data.context = other._data.context;
_data.num = other._data.num;
_inc_ref();
return *this;
}
Signal::~Signal() { _dec_ref_and_unlock(); }
void Signal::_dec_ref_and_unlock()
{
if (_data.context) {
Lock::Guard lock_guard(_data.context->_lock);
_data.context->_ref_cnt--;
if (_data.context->_ref_cnt == 0)
_data.context->_destroy_lock.unlock();
}
}
void Signal::_inc_ref()
{
if (_data.context) {
Lock::Guard lock_guard(_data.context->_lock);
_data.context->_ref_cnt++;
}
}
Signal::Signal(Signal::Data data) : _data(data)
{
if (_data.context) {
_data.context->_ref_cnt = 1;
_data.context->_destroy_lock.lock();
}
}
/********************
** Signal_context **
********************/
Signal_context::~Signal_context()
{
/*
* Detect bug in an application where a signal context is destroyed prior
* dissolving it from the signal receiver.
*/
if (_receiver)
PERR("Destructing undissolved signal context");
}
/************************
** Signal_transmitter **
************************/
Signal_transmitter::Signal_transmitter(Signal_context_capability context)
: _context(context) { }
void Signal_transmitter::context(Signal_context_capability context) {
_context = context; }
Signal_context_capability Signal_transmitter::context() { return _context; }
/*********************
** Signal_receiver **
*********************/
Signal Signal_receiver::wait_for_signal()
{
for (;;) {
/* block until the receiver has received a signal */
block_for_signal();
try {
return pending_signal();
} catch (Signal_not_pending) { }
}
}
Signal Signal_receiver::pending_signal()
{
Lock::Guard list_lock_guard(_contexts_lock);
/* look up the contexts for the pending signal */
for (List_element<Signal_context> *le = _contexts.first(); le; le = le->next()) {
Signal_context *context = le->object();
Lock::Guard lock_guard(context->_lock);
/* check if context has a pending signal */
if (!context->_pending)
continue;
context->_pending = false;
Signal::Data result = context->_curr_signal;
/* invalidate current signal in context */
context->_curr_signal = Signal::Data(0, 0);
if (result.num == 0)
PWRN("returning signal with num == 0");
Trace::Signal_received trace_event(*context, result.num);
/* return last received signal */
return result;
}
/*
* Normally, we should never arrive at this point because that would
* mean, the '_signal_available' semaphore was increased without
* registering the signal in any context associated to the receiver.
*
* However, if a context gets dissolved right after submitting a
* signal, we may have increased the semaphore already. In this case
* the signal-causing context is absent from the list.
*/
throw Signal_not_pending();
}
Signal_receiver::~Signal_receiver()
{
Lock::Guard list_lock_guard(_contexts_lock);
/* disassociate contexts from the receiver */
for (List_element<Signal_context> *le; (le = _contexts.first()); )
_unsynchronized_dissolve(le->object());
_platform_destructor();
}
void Signal_receiver::_unsynchronized_dissolve(Signal_context * const context)
{
_platform_begin_dissolve(context);
/* tell core to stop sending signals referring to the context */
env()->pd_session()->free_context(context->_cap);
/* restore default initialization of signal context */
context->_receiver = 0;
context->_cap = Signal_context_capability();
/* remove context from context list */
_contexts.remove(&context->_receiver_le);
_platform_finish_dissolve(context);
}
void Signal_receiver::dissolve(Signal_context *context)
{
if (context->_receiver != this)
throw Context_not_associated();
Lock::Guard list_lock_guard(_contexts_lock);
_unsynchronized_dissolve(context);
Lock::Guard context_destroy_lock_guard(context->_destroy_lock);
}
bool Signal_receiver::pending()
{
Lock::Guard list_lock_guard(_contexts_lock);
/* look up the contexts for the pending signal */
for (List_element<Signal_context> *le = _contexts.first(); le; le = le->next()) {
Signal_context *context = le->object();
Lock::Guard lock_guard(context->_lock);
if (context->_pending)
return true;
}
return false;
}

View File

@ -0,0 +1,31 @@
/*
* \brief Generic implementation parts of the signaling framework
* \author Norman Feske
* \author Alexander Boettcher
*/
/*
* Copyright (C) 2008-2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/env.h>
#include <base/trace/events.h>
using namespace Genode;
/************************
** Signal transmitter **
************************/
void Signal_transmitter::submit(unsigned cnt)
{
{
Trace::Signal_submit trace_event(cnt);
}
env()->pd_session()->submit(_context, cnt);
}

View File

@ -0,0 +1,404 @@
/*
* \brief Slab allocator implementation
* \author Norman Feske
* \date 2006-05-16
*/
/*
* Copyright (C) 2006-2016 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/slab.h>
#include <util/construct_at.h>
#include <util/misc_math.h>
using namespace Genode;
/**
* A slab block holds an array of slab entries.
*/
class Genode::Slab::Block
{
public:
Block *next = this; /* next block in ring */
Block *prev = this; /* previous block in ring */
private:
enum { FREE, USED };
Slab &_slab; /* back reference to slab */
size_t _avail = _slab._entries_per_block; /* free entries of this block */
/*
* Each slab block consists of three areas, a fixed-size header
* that contains the member variables declared above, a byte array
* called state table that holds the allocation state for each slab
* entry, and an area holding the actual slab entries. The number
* of state-table elements corresponds to the maximum number of slab
* entries per slab block (the '_entries_per_block' member variable of
* the Slab allocator).
*/
char _data[]; /* dynamic data (state table and slab entries) */
/*
* Caution! no member variables allowed below this line!
*/
/**
* Return the allocation state of a slab entry
*/
inline bool _state(int idx) { return _data[idx]; }
/**
* Set the allocation state of a slab entry
*/
inline void _state(int idx, bool state) { _data[idx] = state; }
/**
* Request address of slab entry by its index
*/
Entry *_slab_entry(int idx);
/**
* Determine block index of specified slab entry
*/
int _slab_entry_idx(Entry *e);
public:
/**
* Constructor
*/
explicit Block(Slab &slab) : _slab(slab)
{
for (unsigned i = 0; i < _avail; i++)
_state(i, FREE);
}
/**
* Request number of available entries in block
*/
unsigned avail() const { return _avail; }
/**
* Allocate slab entry from block
*/
void *alloc();
/**
* Return a used slab block entry
*/
Entry *any_used_entry();
/**
* These functions are called by Slab::Entry.
*/
void inc_avail(Entry &e);
void dec_avail() { _avail--; }
};
struct Genode::Slab::Entry
{
Block &block;
char data[];
/*
* Caution! no member variables allowed below this line!
*/
explicit Entry(Block &block) : block(block)
{
block.dec_avail();
}
~Entry()
{
block.inc_avail(*this);
}
/**
* Lookup Entry by given address
*
* The specified address is supposed to point to _data[0].
*/
static Entry *slab_entry(void *addr) {
return (Entry *)((addr_t)addr - sizeof(Entry)); }
};
/****************
** Slab block **
****************/
Slab::Entry *Slab::Block::_slab_entry(int idx)
{
/*
* The slab slots start after the state array that consists
* of 'num_elem' bytes. We align the first slot to a four-aligned
* address.
*/
size_t const entry_size = sizeof(Entry) + _slab._slab_size;
return (Entry *)&_data[align_addr(_slab._entries_per_block, log2(sizeof(addr_t)))
+ entry_size*idx];
}
int Slab::Block::_slab_entry_idx(Slab::Entry *e)
{
size_t const entry_size = sizeof(Entry) + _slab._slab_size;
return ((addr_t)e - (addr_t)_slab_entry(0))/entry_size;
}
void *Slab::Block::alloc()
{
for (unsigned i = 0; i < _slab._entries_per_block; i++) {
if (_state(i) != FREE)
continue;
_state(i, USED);
Entry * const e = _slab_entry(i);
construct_at<Entry>(e, *this);
return e->data;
}
return nullptr;
}
Slab::Entry *Slab::Block::any_used_entry()
{
for (unsigned i = 0; i < _slab._entries_per_block; i++)
if (_state(i) == USED)
return _slab_entry(i);
return nullptr;
}
void Slab::Block::inc_avail(Entry &e)
{
/* mark slab entry as free */
_state(_slab_entry_idx(&e), FREE);
_avail++;
}
/**********
** Slab **
**********/
Slab::Slab(size_t slab_size, size_t block_size, void *initial_sb,
Allocator *backing_store)
:
_slab_size(slab_size),
_block_size(block_size),
/*
* Calculate number of entries per slab block.
*
* The 'sizeof(umword_t)' is for the alignment of the first slab entry.
* The 1 is for one byte state entry.
*/
_entries_per_block((_block_size - sizeof(Block) - sizeof(umword_t))
/ (_slab_size + sizeof(Entry) + 1)),
_initial_sb((Block *)initial_sb),
_nested(false),
_curr_sb((Block *)initial_sb),
_backing_store(backing_store)
{
/* if no initial slab block was specified, try to get one */
if (!_curr_sb && _backing_store)
_curr_sb = _new_slab_block();
if (!_curr_sb) {
PERR("failed to obtain initial slab block");
return;
}
/* init first slab block */
construct_at<Block>(_curr_sb, *this);
_total_avail = _entries_per_block;
_num_blocks = 1;
}
Slab::~Slab()
{
if (!_backing_store)
return;
/* free backing store */
while (_num_blocks > 1)
_free_curr_sb();
/* release last block */
_release_backing_store(_curr_sb);
}
Slab::Block *Slab::_new_slab_block()
{
void *sb = nullptr;
if (!_backing_store || !_backing_store->alloc(_block_size, &sb))
return nullptr;
return construct_at<Block>(sb, *this);
}
void Slab::_release_backing_store(Block *block)
{
if (block->avail() != _entries_per_block)
PWRN("freeing non-empty slab block");
_total_avail -= block->avail();
_num_blocks--;
/*
* Free only the slab blocks that we allocated dynamically. This is not the
* case for the '_initial_sb' that we got as constructor argument.
*/
if (_backing_store && (block != _initial_sb))
_backing_store->free(block, _block_size);
}
void Slab::_free_curr_sb()
{
/* block to free */
Block * const block = _curr_sb;
/* advance '_curr_sb' because the old pointer won't be valid any longer */
_curr_sb = _curr_sb->next;
/* never free the initial block */
if (_num_blocks <= 1)
return;
/* remove block from ring */
block->prev->next = block->next;
block->next->prev = block->prev;
_release_backing_store(block);
}
void Slab::_insert_sb(Block *sb)
{
sb->prev = _curr_sb;
sb->next = _curr_sb->next;
_curr_sb->next->prev = sb;
_curr_sb->next = sb;
_total_avail += _entries_per_block;
_num_blocks++;
}
void Slab::insert_sb(void *ptr)
{
_insert_sb(construct_at<Block>(ptr, *this));
}
bool Slab::alloc(size_t size, void **out_addr)
{
/*
* If we run out of slab, we need to allocate a new slab block. For the
* special case that this block is allocated using the allocator that by
* itself uses the slab allocator, such an allocation could cause up to
* three new slab_entry allocations. So we need to ensure to allocate the
* new slab block early enough - that is if there are only three free slab
* entries left.
*/
if (_backing_store && (_total_avail <= 3) && !_nested) {
/* allocate new block for slab */
_nested = true;
Block * const sb = _new_slab_block();
_nested = false;
if (!sb) return false;
/*
* The new block has the maximum number of available slots and
* so we can insert it at the beginning of the sorted block
* list.
*/
_insert_sb(sb);
}
/* skip completely occupied slab blocks, detect cycles */
Block const * const orig_curr_sb = _curr_sb;
for (; _curr_sb->avail() == 0; _curr_sb = _curr_sb->next)
if (_curr_sb->next == orig_curr_sb)
break;
*out_addr = _curr_sb->alloc();
if (*out_addr == nullptr)
return false;
_total_avail--;
return true;
}
void Slab::_free(void *addr)
{
Entry *e = addr ? Entry::slab_entry(addr) : nullptr;
if (!e)
return;
Block &block = e->block;
e->~Entry();
_total_avail++;
/*
* Release completely free slab blocks if the total number of free slab
* entries exceeds the capacity of two slab blocks. This way we keep
* a modest amount of available entries around so that thrashing effects
* are mitigated.
*/
_curr_sb = &block;
while (_total_avail > 2*_entries_per_block
&& _num_blocks > 1
&& _curr_sb->avail() == _entries_per_block) {
_free_curr_sb();
}
}
void *Slab::any_used_elem()
{
if (_total_avail == _num_blocks*_entries_per_block)
return nullptr;
/*
* We know that there exists at least one used element.
*/
/* skip completely free slab blocks */
for (; _curr_sb->avail() == _entries_per_block; _curr_sb = _curr_sb->next);
/* found a block with used elements - return address of the first one */
Entry *e = _curr_sb->any_used_entry();
return e ? e->data : nullptr;
}
size_t Slab::consumed() const { return _num_blocks*_block_size; }

View File

@ -0,0 +1,22 @@
/*
* \brief Lay back and relax
* \author Norman Feske
* \author Christian Helmuth
* \date 2006-07-19
*/
/*
* Copyright (C) 2006-2016 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/sleep.h>
#include <base/lock.h>
void Genode::sleep_forever()
{
Lock sleep;
while (true) sleep.lock();
}

View File

@ -0,0 +1,111 @@
/*
* \brief Heap that stores each block at a separate dataspace
* \author Norman Feske
* \date 2006-08-16
*/
/*
* Copyright (C) 2006-2016 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <util/construct_at.h>
#include <base/heap.h>
#include <base/printf.h>
using namespace Genode;
Sliced_heap::Sliced_heap(Ram_session &ram_session, Region_map &region_map)
:
_ram_session(ram_session), _region_map(region_map), _consumed(0)
{ }
Sliced_heap::~Sliced_heap()
{
for (Block *b; (b = _blocks.first()); ) {
/*
* Compute pointer to payload, which follows the meta-data header.
* Note the pointer arithmetics. By adding 1 to 'b', we end up with
* 'payload' pointing to the data portion of the block.
*/
void * const payload = b + 1;
free(payload, b->size);
}
}
bool Sliced_heap::alloc(size_t size, void **out_addr)
{
/* allocation includes space for block meta data and is page-aligned */
size = align_addr(size + sizeof(Block), 12);
Ram_dataspace_capability ds_cap;
Block *block = nullptr;
try {
ds_cap = _ram_session.alloc(size);
block = _region_map.attach(ds_cap);
} catch (Region_map::Attach_failed) {
PERR("Could not attach dataspace to local address space");
_ram_session.free(ds_cap);
return false;
} catch (Ram_session::Alloc_failed) {
PERR("Could not allocate dataspace with size %zu", size);
return false;
}
/* serialize access to block list */
Lock::Guard lock_guard(_lock);
construct_at<Block>(block, ds_cap, size);
_consumed += size;
_blocks.insert(block);
/* skip meta data prepended to the payload portion of the block */
*out_addr = block + 1;
return true;
}
void Sliced_heap::free(void *addr, size_t size)
{
Ram_dataspace_capability ds_cap;
void *local_addr = nullptr;
{
/* serialize access to block list */
Lock::Guard lock_guard(_lock);
/*
* The 'addr' argument points to the payload. We use pointer
* arithmetics to determine the pointer to the block's meta data that
* is prepended to the payload.
*/
Block * const block = reinterpret_cast<Block *>(addr) - 1;
_blocks.remove(block);
_consumed -= block->size;
ds_cap = block->ds;
local_addr = block;
/*
* Call destructor to properly destruct the dataspace capability
* member of the 'Block'.
*/
block->~Block();
}
_region_map.detach(local_addr);
_ram_session.free(ds_cap);
}
size_t Sliced_heap::overhead(size_t size) const
{
return align_addr(size + sizeof(Block), 12) - size;
}

View File

@ -0,0 +1,76 @@
/*
* \brief Stack-allocator implementation for the Genode Thread API
* \author Norman Feske
* \author Martin Stein
* \date 2014-01-26
*/
/*
* Copyright (C) 2010-2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* base-internal includes */
#include <base/internal/stack_allocator.h>
using namespace Genode;
Stack *Stack_allocator::base_to_stack(addr_t base)
{
addr_t result = base + stack_virtual_size()
- sizeof(Stack);
return reinterpret_cast<Stack *>(result);
}
addr_t Stack_allocator::addr_to_base(void *addr)
{
return ((addr_t)addr) & ~(stack_virtual_size() - 1);
}
size_t Stack_allocator::base_to_idx(addr_t base)
{
return (base - stack_area_virtual_base()) / stack_virtual_size();
}
addr_t Stack_allocator::idx_to_base(size_t idx)
{
return stack_area_virtual_base() + idx * stack_virtual_size();
}
Stack *
Stack_allocator::alloc(Thread_base *thread_base, bool main_thread)
{
if (main_thread)
/* the main-thread stack is the first one */
return base_to_stack(stack_area_virtual_base());
try {
Lock::Guard _lock_guard(_threads_lock);
return base_to_stack(idx_to_base(_alloc.alloc()));
} catch(Bit_allocator<MAX_THREADS>::Out_of_indices) {
return 0;
}
}
void Stack_allocator::free(Stack *stack)
{
addr_t const base = addr_to_base(stack);
Lock::Guard _lock_guard(_threads_lock);
_alloc.free(base_to_idx(base));
}
Stack_allocator &Stack_allocator::stack_allocator()
{
static Stack_allocator inst;
return inst;
}

View File

@ -0,0 +1,22 @@
/*
* \brief Component-local stack area
* \author Norman Feske
* \date 2010-01-19
*/
/*
* Copyright (C) 2010-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* base-internal includes */
#include <base/internal/platform_env_common.h>
#include <base/internal/stack_area.h>
namespace Genode {
Region_map *env_stack_area_region_map;
Ram_session *env_stack_area_ram_session;
}

View File

@ -0,0 +1,252 @@
/*
* \brief Implementation of the Thread API
* \author Norman Feske
* \date 2010-01-11
*/
/*
* Copyright (C) 2010-2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <util/construct_at.h>
#include <util/string.h>
#include <util/misc_math.h>
#include <base/thread.h>
#include <base/env.h>
#include <base/sleep.h>
#include <base/snprintf.h>
/* base-internal includes */
#include <base/internal/stack_allocator.h>
using namespace Genode;
/**
* Return the managed dataspace holding the stack area
*
* This function is provided by the process environment.
*/
namespace Genode {
extern Region_map * const env_stack_area_region_map;
extern Ram_session * const env_stack_area_ram_session;
}
void Stack::size(size_t const size)
{
/* check if the stack needs to be enhanced */
size_t const stack_size = (addr_t)_stack - _base;
if (stack_size >= size) { return; }
/* check if the stack enhancement fits the stack region */
enum {
UTCB_SIZE = sizeof(Native_utcb),
PAGE_SIZE_LOG2 = 12,
PAGE_SIZE = (1UL << PAGE_SIZE_LOG2),
};
addr_t const stack_slot_base = Stack_allocator::addr_to_base(this);
size_t const ds_size = align_addr(size - stack_size, PAGE_SIZE_LOG2);
if (_base - ds_size < stack_slot_base)
throw Thread_base::Stack_too_large();
/* allocate and attach backing store for the stack enhancement */
addr_t const ds_addr = _base - ds_size - stack_area_virtual_base();
try {
Ram_session * const ram = env_stack_area_ram_session;
Ram_dataspace_capability const ds_cap = ram->alloc(ds_size);
Region_map * const rm = env_stack_area_region_map;
void * const attach_addr = rm->attach_at(ds_cap, ds_addr, ds_size);
if (ds_addr != (addr_t)attach_addr)
throw Thread_base::Out_of_stack_space();
}
catch (Ram_session::Alloc_failed) {
throw Thread_base::Stack_alloc_failed();
}
/* update stack information */
_base -= ds_size;
}
Stack *
Thread_base::_alloc_stack(size_t stack_size, char const *name, bool main_thread)
{
/*
* Synchronize stack list when creating new threads from multiple threads
*
* XXX: remove interim fix
*/
static Lock alloc_lock;
Lock::Guard _lock_guard(alloc_lock);
/* allocate stack */
Stack *stack = Stack_allocator::stack_allocator().alloc(this, main_thread);
if (!stack)
throw Out_of_stack_space();
/* determine size of dataspace to allocate for the stack */
enum { PAGE_SIZE_LOG2 = 12 };
size_t ds_size = align_addr(stack_size, PAGE_SIZE_LOG2);
if (stack_size >= stack_virtual_size() -
sizeof(Native_utcb) - (1UL << PAGE_SIZE_LOG2))
throw Stack_too_large();
/*
* Calculate base address of the stack
*
* The stack pointer is always located at the top of the stack header.
*/
addr_t ds_addr = Stack_allocator::addr_to_base(stack) +
stack_virtual_size() - ds_size;
/* add padding for UTCB if defined for the platform */
if (sizeof(Native_utcb) >= (1 << PAGE_SIZE_LOG2))
ds_addr -= sizeof(Native_utcb);
/* allocate and attach backing store for the stack */
Ram_dataspace_capability ds_cap;
try {
ds_cap = env_stack_area_ram_session->alloc(ds_size);
addr_t attach_addr = ds_addr - stack_area_virtual_base();
if (attach_addr != (addr_t)env_stack_area_region_map->attach_at(ds_cap, attach_addr, ds_size))
throw Stack_alloc_failed();
}
catch (Ram_session::Alloc_failed) { throw Stack_alloc_failed(); }
/*
* Now the stack is backed by memory, so it is safe to access its members.
*
* We need to initialize the stack object's memory with zeroes, otherwise
* the ds_cap isn't invalid. That would cause trouble when the assignment
* operator of Native_capability is used.
*/
construct_at<Stack>(stack, name, *this, ds_addr, ds_cap);
Abi::init_stack(stack->top());
return stack;
}
void Thread_base::_free_stack(Stack *stack)
{
addr_t ds_addr = stack->base() - stack_area_virtual_base();
Ram_dataspace_capability ds_cap = stack->ds_cap();
/* call de-constructor explicitly before memory gets detached */
stack->~Stack();
Genode::env_stack_area_region_map->detach((void *)ds_addr);
Genode::env_stack_area_ram_session->free(ds_cap);
/* stack ready for reuse */
Stack_allocator::stack_allocator().free(stack);
}
void Thread_base::name(char *dst, size_t dst_len)
{
snprintf(dst, dst_len, "%s", _stack->name().string());
}
void Thread_base::join() { _join_lock.lock(); }
void *Thread_base::alloc_secondary_stack(char const *name, size_t stack_size)
{
Stack *stack = _alloc_stack(stack_size, name, false);
return (void *)stack->top();
}
void Thread_base::free_secondary_stack(void* stack_addr)
{
addr_t base = Stack_allocator::addr_to_base(stack_addr);
_free_stack(Stack_allocator::base_to_stack(base));
}
Native_thread &Thread_base::native_thread() {
return _stack->native_thread(); }
void *Thread_base::stack_top() const { return (void *)_stack->top(); }
void *Thread_base::stack_base() const { return (void*)_stack->base(); }
void Thread_base::stack_size(size_t const size) { _stack->size(size); }
size_t Thread_base::stack_virtual_size()
{
return Genode::stack_virtual_size();
}
addr_t Thread_base::stack_area_virtual_base()
{
return Genode::stack_area_virtual_base();
}
size_t Thread_base::stack_area_virtual_size()
{
return Genode::stack_area_virtual_size();
}
Thread_base::Thread_base(size_t weight, const char *name, size_t stack_size,
Type type, Cpu_session *cpu_session, Affinity::Location affinity)
:
_cpu_session(cpu_session),
_affinity(affinity),
_trace_control(nullptr),
_stack(type == REINITIALIZED_MAIN ?
_stack : _alloc_stack(stack_size, name, type == MAIN)),
_join_lock(Lock::LOCKED)
{
_init_platform_thread(weight, type);
if (_cpu_session) {
Dataspace_capability ds = _cpu_session->trace_control();
if (ds.valid())
_trace_control = env()->rm_session()->attach(ds);
}
}
Thread_base::Thread_base(size_t weight, const char *name, size_t stack_size,
Type type, Affinity::Location affinity)
: Thread_base(weight, name, stack_size, type, nullptr, affinity) { }
Thread_base::~Thread_base()
{
if (Thread_base::myself() == this) {
PERR("thread '%s' tried to self de-struct - sleeping forever.",
_stack->name().string());
sleep_forever();
}
_deinit_platform_thread();
_free_stack(_stack);
/*
* We have to detach the trace control dataspace last because
* we cannot invalidate the pointer used by the Trace::Logger
* from here and any following RPC call will stumple upon the
* detached trace control dataspace.
*/
if (_trace_control)
env()->rm_session()->detach(_trace_control);
}

View File

@ -0,0 +1,18 @@
/*
* \brief Default thread bootstrap code
* \author Norman Feske
* \date 2009-04-02
*/
/*
* Copyright (C) 2009-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/thread.h>
using namespace Genode;
void Thread_base::_thread_bootstrap() { }

View File

@ -0,0 +1,38 @@
/*
* \brief Implementation of the Thread API (generic myself())
* \author Norman Feske
* \date 2015-04-28
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/thread.h>
/* base-internal includes */
#include <base/internal/stack_allocator.h>
#include <base/internal/stack_area.h>
Genode::Thread_base *Genode::Thread_base::myself()
{
int dummy = 0; /* used for determining the stack pointer */
/*
* If the stack pointer is outside the stack area, we assume that
* we are the main thread because this condition can never met by any other
* thread.
*/
addr_t sp = (addr_t)(&dummy);
if (sp < Genode::stack_area_virtual_base() ||
sp >= Genode::stack_area_virtual_base() + Genode::stack_area_virtual_size())
return 0;
addr_t base = Stack_allocator::addr_to_base(&dummy);
return &Stack_allocator::base_to_stack(base)->thread();
}

View File

@ -0,0 +1,75 @@
/*
* \brief Implementation of the Thread API
* \author Norman Feske
* \date 2010-01-19
*/
/*
* Copyright (C) 2010-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/thread.h>
#include <base/printf.h>
#include <base/sleep.h>
#include <base/env.h>
/* base-internal includes */
#include <base/internal/stack.h>
using namespace Genode;
/**
* Entry point entered by new threads
*/
void Thread_base::_thread_start()
{
Thread_base::myself()->_thread_bootstrap();
Thread_base::myself()->entry();
Thread_base::myself()->_join_lock.unlock();
Genode::sleep_forever();
}
/*****************
** Thread base **
*****************/
void Thread_base::_deinit_platform_thread()
{
if (!_cpu_session)
_cpu_session = env()->cpu_session();
_cpu_session->kill_thread(_thread_cap);
}
void Thread_base::start()
{
/* if no cpu session is given, use it from the environment */
if (!_cpu_session)
_cpu_session = env()->cpu_session();
/* create thread at core */
char buf[48];
name(buf, sizeof(buf));
enum { WEIGHT = Cpu_session::DEFAULT_WEIGHT };
addr_t const utcb = (addr_t)&_stack->utcb();
_thread_cap = _cpu_session->create_thread(env()->pd_session_cap(),
WEIGHT, buf, _affinity, utcb);
if (!_thread_cap.valid())
throw Cpu_session::Thread_creation_failed();
/* start execution at initial instruction pointer and stack pointer */
_cpu_session->start(_thread_cap, (addr_t)_thread_start, _stack->top());
}
void Thread_base::cancel_blocking()
{
_cpu_session->cancel_blocking(_thread_cap);
}

View File

@ -0,0 +1,220 @@
/*
* \brief Event-tracing support
* \author Norman Feske
* \date 2013-08-09
*/
/*
* Copyright (C) 2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/env.h>
#include <base/thread.h>
#include <base/trace/policy.h>
#include <dataspace/client.h>
#include <util/construct_at.h>
/* local includes */
#include <base/internal/trace_control.h>
using namespace Genode;
namespace Genode { bool inhibit_tracing = true; /* cleared by '_main' */ }
/*******************
** Trace::Logger **
*******************/
bool Trace::Logger::_evaluate_control()
{
/* check process-global and thread-specific tracing condition */
if (inhibit_tracing || !control || control->tracing_inhibited())
return false;
if (control->state_changed()) {
/* suppress tracing during initialization */
Control::Inhibit_guard guard(*control);
if (control->to_be_disabled()) {
/* unload policy */
if (policy_module) {
env()->rm_session()->detach(policy_module);
policy_module = 0;
}
/* unmap trace buffer */
if (buffer) {
env()->rm_session()->detach(buffer);
buffer = 0;
}
/* inhibit generation of trace events */
enabled = false;
control->acknowledge_disabled();
}
else if (control->to_be_enabled()) {
control->acknowledge_enabled();
enabled = true;
}
}
if (enabled && (policy_version != control->policy_version())) {
/* suppress tracing during policy change */
Control::Inhibit_guard guard(*control);
/* obtain policy */
Dataspace_capability policy_ds = cpu->trace_policy(thread_cap);
if (!policy_ds.valid()) {
PWRN("could not obtain trace policy");
control->error();
enabled = false;
return false;
}
try {
max_event_size = 0;
policy_module = 0;
policy_module = env()->rm_session()->attach(policy_ds);
/* relocate function pointers of policy callback table */
for (unsigned i = 0; i < sizeof(Trace::Policy_module)/sizeof(void *); i++) {
((addr_t *)policy_module)[i] += (addr_t)(policy_module);
}
max_event_size = policy_module->max_event_size();
} catch (...) { }
/* obtain buffer */
buffer = 0;
Dataspace_capability buffer_ds = cpu->trace_buffer(thread_cap);
if (!buffer_ds.valid()) {
PWRN("could not obtain trace buffer");
control->error();
enabled = false;
return false;
}
try {
buffer = env()->rm_session()->attach(buffer_ds);
buffer->init(Dataspace_client(buffer_ds).size());
} catch (...) { }
policy_version = control->policy_version();
}
return enabled && policy_module;
}
void Trace::Logger::log(char const *msg, size_t len)
{
if (!this || !_evaluate_control()) return;
memcpy(buffer->reserve(len), msg, len);
buffer->commit(len);
}
void Trace::Logger::init(Thread_capability thread, Cpu_session *cpu_session,
Trace::Control *attached_control)
{
if (!attached_control)
return;
thread_cap = thread;
cpu = cpu_session;
unsigned const index = cpu->trace_control_index(thread);
Dataspace_capability ds = cpu->trace_control();
size_t size = Dataspace_client(ds).size();
if ((index + 1)*sizeof(Trace::Control) > size) {
PERR("thread control index is out of range");
return;
}
control = attached_control + index;
}
Trace::Logger::Logger()
:
cpu(nullptr),
control(nullptr),
enabled(false),
policy_version(0),
policy_module(0),
max_event_size(0),
pending_init(false)
{ }
/*****************
** Thread_base **
*****************/
/**
* return logger instance for the main thread **
*/
static Trace::Logger *main_trace_logger()
{
static Trace::Logger logger;
return &logger;
}
static Trace::Control *main_trace_control;
Trace::Logger *Thread_base::_logger()
{
if (inhibit_tracing)
return 0;
Thread_base * const myself = Thread_base::myself();
Trace::Logger * const logger = myself ? &myself->_trace_logger
: main_trace_logger();
/* logger is already being initialized */
if (logger->is_init_pending())
return logger;
/* lazily initialize trace object */
if (!logger->is_initialized()) {
logger->init_pending(true);
Thread_capability thread_cap = myself ? myself->_thread_cap
: env()->parent()->main_thread_cap();
Genode::Cpu_session *cpu = myself ? myself->_cpu_session
: env()->cpu_session();
if (!cpu) cpu = env()->cpu_session();
if (!myself)
if (!main_trace_control) {
Dataspace_capability ds = env()->cpu_session()->trace_control();
if (ds.valid())
main_trace_control = env()->rm_session()->attach(ds);
}
logger->init(thread_cap, cpu,
myself ? myself->_trace_control : main_trace_control);
}
return logger;
}

View File

@ -0,0 +1,22 @@
/*
* \brief Accessor to user-level thread-control block (UTCB)
* \author Norman Feske
* \date 2016-01-23
*/
/*
* Copyright (C) 2016 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/thread.h>
/* base-internal includes */
#include <base/internal/stack.h>
#include <base/internal/native_utcb.h>
Genode::Native_utcb *Genode::Thread_base::utcb() { return &_stack->utcb(); }

View File

@ -0,0 +1,66 @@
/*
* \brief Support for exceptions libsupc++
* \author Norman Feske
* \author Sebastian Sumpf
* \date 2006-07-21
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* libsupc++ includes */
#include <cxxabi.h>
/* Genode includes */
#include <base/printf.h>
extern "C" char __eh_frame_start__[]; /* from linker script */
extern "C" void __register_frame (const void *begin); /* from libgcc_eh */
/*
* This symbol is overwritten by Genode's dynamic linker (ld.lib.so).
* After setup, the symbol will point to the actual implementation of
* 'dl_iterate_phdr', which is located within the linker. 'dl_iterate_phdr'
* iterates through all (linker loaded) binaries and shared libraries. This
* function has to be implemented in order to support C++ exceptions within
* shared libraries.
* Return values of dl_iterate_phdr (gcc 4.2.4):
* < 0 = error
* 0 = continue program header iteration
* > 0 = stop iteration (no errors occured)
*
* See also: man dl_iterate_phdr
*/
extern "C" int dl_iterate_phdr(int (*callback) (void *info, unsigned long size, void *data), void *data) __attribute__((weak));
extern "C" int dl_iterate_phdr(int (*callback) (void *info, unsigned long size, void *data), void *data) {
return -1; }
/*
* Terminate handler
*/
void terminate_handler()
{
std::type_info *t = __cxxabiv1::__cxa_current_exception_type();
if (t)
PERR("Uncaught exception of type '%s' (use 'c++filt -t' to demangle)",
t->name());
}
/*
* Initialization
*/
void init_exception_handling()
{
__register_frame(__eh_frame_start__);
std::set_terminate(terminate_handler);
}

View File

@ -0,0 +1,72 @@
/*
* \brief Support for guarded variables
* \author Christian Helmuth
* \date 2009-11-18
*/
/*
* Copyright (C) 2009-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <cpu/atomic.h>
namespace __cxxabiv1
{
/*
* A guarded variable can be in three states:
*
* 1) not initialized
* 2) in initialization (transient)
* 3) initialized
*
* The generic ABI uses the first byte of a 64-bit guard variable for state
* 1), 2) and 3). ARM-EABI uses the first byte of a 32-bit guard variable.
* Therefore we define '__guard' as a 32-bit type and use the least
* significant byte for 1) and 3) and the following byte for 2) and let the
* other threads spin until the guard is released by the thread in
* initialization.
*/
typedef int __guard;
extern "C" int __cxa_guard_acquire(__guard *guard)
{
volatile char *initialized = (char *)guard;
volatile int *in_init = (int *)guard;
/* check for state 3) */
if (*initialized) return 0;
/* atomically check and set state 2) */
if (!Genode::cmpxchg(in_init, 0, 0x100)) {
/* spin until state 3) is reached if other thread is in init */
while (!*initialized) ;
/* guard not acquired */
return 0;
}
/*
* The guard was acquired. The caller is allowed to initialize the
* guarded variable and required to call __cxa_guard_release() to flag
* initialization completion (and unblock other guard applicants).
*/
return 1;
}
extern "C" void __cxa_guard_release(__guard *guard)
{
volatile char *initialized = (char *)guard;
/* set state 3) */
*initialized = 1;
}
extern "C" void __cxa_guard_abort(__guard *) { }
}

View File

@ -0,0 +1,121 @@
/*
* \brief Simplistic malloc and free implementation
* \author Norman Feske
* \date 2006-07-21
*
* Malloc and free are required by the C++ exception handling.
* Therefore, we provide it here.
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/env.h>
#include <base/heap.h>
#include <util/string.h>
using namespace Genode;
/**
* Return heap partition for the private use within the cxx library.
*
* If we used the 'env()->heap()' with the C++ runtime, we would run into a
* deadlock when a 'Ram_session::Alloc_failed' exception is thrown from within
* 'Heap::alloc'. For creating the exception object, the C++ runtime calls
* '__cxa_allocate_exception', which, in turn, calls 'malloc'. If our 'malloc'
* implementation called 'env()->heap()->alloc()', we would end up in a
* recursive attempt to obtain the 'env()->heap()' lock.
*
* By using a dedicated heap instance for the cxx library, we avoid this
* circular condition.
*/
static Heap *cxx_heap()
{
/*
* Exception frames are small. Hence, a small static backing store suffices
* for the cxx heap partition in the normal case. The 'env()->ram_session'
* is used only if the demand exceeds the capacity of the 'initial_block'.
*/
static char initial_block[1024*sizeof(long)];
static Heap heap(env()->ram_session(), env()->rm_session(),
Heap::UNLIMITED, initial_block, sizeof(initial_block));
return &heap;
}
typedef unsigned long Block_header;
extern "C" void *malloc(unsigned size)
{
/* enforce size to be a multiple of 4 bytes */
size = (size + 3) & ~3;
/*
* We store the size of the allocation at the very
* beginning of the allocated block and return
* the subsequent address. This way, we can retrieve
* the size information when freeing the block.
*/
unsigned long real_size = size + sizeof(Block_header);
void *addr = 0;
if (!cxx_heap()->alloc(real_size, &addr))
return 0;
*(Block_header *)addr = real_size;
return (Block_header *)addr + 1;
}
extern "C" void *calloc(unsigned nmemb, unsigned size)
{
void *addr = malloc(nmemb*size);
Genode::memset(addr, 0, nmemb*size);
return addr;
}
extern "C" void free(void *ptr)
{
if (!ptr) return;
unsigned long *addr = ((unsigned long *)ptr) - 1;
cxx_heap()->free(addr, *addr);
}
extern "C" void *realloc(void *ptr, Genode::size_t size)
{
if (!ptr)
return malloc(size);
if (!size) {
free(ptr);
return 0;
}
/* determine size of old block content (without header) */
unsigned long old_size = *((Block_header *)ptr - 1)
- sizeof(Block_header);
/* do not reallocate if new size is less than the current size */
if (size <= old_size)
return ptr;
/* allocate new block */
void *new_addr = malloc(size);
/* copy content from old block into new block */
if (new_addr)
memcpy(new_addr, ptr, Genode::min(old_size, (unsigned long)size));
/* free old block */
free(ptr);
return new_addr;
}

View File

@ -0,0 +1,229 @@
/*
* \brief Dummy functions to make the damn thing link
* \author Norman Feske
* \date 2006-04-07
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/env.h>
#include <base/printf.h>
#include <base/stdint.h>
#include <base/sleep.h>
#include <base/thread.h>
#include <util/string.h>
using namespace Genode;
extern "C" void __cxa_pure_virtual()
{
PWRN("cxa pure virtual function called, return addr is %p",
__builtin_return_address(0));
}
extern "C" void __pure_virtual()
{
PWRN("pure virtual function called");
}
/**
* Prototype for exit-handler support function provided by '_main.cc'
*/
extern int genode___cxa_atexit(void(*func)(void*), void *arg,
void *dso_handle);
extern "C" int __cxa_atexit(void(*func)(void*), void *arg,
void *dso_handle)
{
return genode___cxa_atexit(func, arg, dso_handle);
}
/**
* Prototype for finalize support function provided by '_main.cc'
*/
extern int genode___cxa_finalize(void *dso);
extern "C" void __cxa_finalize(void *dso)
{
genode___cxa_finalize(dso);
}
/***********************************
** Support required for ARM EABI **
***********************************/
extern "C" int __aeabi_atexit(void *arg, void(*func)(void*),
void *dso_handle)
{
return genode___cxa_atexit(func, arg, dso_handle);
}
extern "C" __attribute__((weak))
void *__tls_get_addr()
{
static long dummy_tls;
return &dummy_tls;
}
/**
* Not needed for exception-handling init on ARM EABI,
* but called from 'init_exception'
*/
extern "C" __attribute__((weak))
void __register_frame(void *) { }
/**
* Needed to build for OKL4-gta01 with ARM EABI
*/
extern "C" __attribute__((weak)) void raise()
{
PDBG("raise called - not implemented\n");
}
/***************************
** Support for libsupc++ **
***************************/
extern "C" void *abort(void)
{
Genode::Thread_base * myself = Genode::Thread_base::myself();
char thread_name[64] = { "unknown" };
if (myself)
myself->name(thread_name, sizeof(thread_name));
PWRN("abort called - thread: '%s'", thread_name);
/* Notify the parent of failure */
if (!strcmp("main", thread_name, sizeof(thread_name)))
env()->parent()->exit(1);
sleep_forever();
return 0;
}
extern "C" void *fputc(void) {
return 0;
}
extern "C" void *fputs(const char *s, void *) {
PWRN("C++ runtime: %s", s);
return 0;
}
extern "C" void *fwrite(void) {
return 0;
}
extern "C" int memcmp(const void *p0, const void *p1, size_t size)
{
return Genode::memcmp(p0, p1, size);
}
extern "C" __attribute__((weak))
void *memcpy(void *dst, void *src, size_t n)
{
// PDBG("dst=%p, src=%p, n=%d", dst, src, n);
return Genode::memcpy(dst, src, n);
}
extern "C" __attribute__((weak))
void *memmove(void *dst, void *src, size_t n)
{
// PDBG("dst=%p, src=%p, n=%d", dst, src, n);
return Genode::memmove(dst, src, n);
}
extern "C" __attribute__((weak))
void *memset(void *s, int c, size_t n)
{
// PDBG("s=%p, c=%d, n=%d", s, c, n);
return Genode::memset(s, c, n);
}
extern "C" void *stderr(void) {
PWRN("stderr - not yet implemented");
return 0;
}
/*
* Used when having compiled libsupc++ with the FreeBSD libc
*/
struct FILE;
FILE *__stderrp;
extern "C" void *strcat(void)
{
PWRN("strcat - not yet implemented");
return 0;
}
extern "C" int strncmp(const char *s1, const char *s2, size_t n)
{
return Genode::strcmp(s1, s2, n);
}
extern "C" char *strcpy(char *dest, const char *src)
{
return Genode::strncpy(dest, src, ~0UL);
}
extern "C" size_t strlen(const char *s)
{
return Genode::strlen(s);
}
extern "C" int strcmp(const char *s1, const char *s2)
{
return Genode::strcmp(s1, s2);
}
/*
* Needed by ARM EABI (gcc-4.4 Codesourcery release1039)
*/
extern "C" int sprintf(char *str, const char *format, ...)
{
PWRN("sprintf - not implemented");
return 0;
}
/**********************************
** Support for stack protection **
**********************************/
extern "C" __attribute__((weak)) void __stack_chk_fail_local(void)
{
PERR("Violated stack boundary");
}

View File

@ -0,0 +1,74 @@
/*
* \brief New and delete are special
* \author Norman Feske
* \date 2006-04-07
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/printf.h>
#include <base/allocator.h>
#include <base/sleep.h>
using Genode::size_t;
using Genode::Allocator;
using Genode::Deallocator;
using Genode::sleep_forever;
static void *try_alloc(Allocator *alloc, size_t size)
{
if (!alloc)
throw Allocator::Out_of_memory();
return alloc->alloc(size);
}
void *operator new (size_t s, Allocator *a) { return try_alloc(a, s); }
void *operator new [] (size_t s, Allocator *a) { return try_alloc(a, s); }
void *operator new (size_t s, Allocator &a) { return a.alloc(s); }
void *operator new [] (size_t s, Allocator &a) { return a.alloc(s); }
static void try_dealloc(void *ptr, Deallocator &dealloc)
{
/*
* Log error and block on the attempt to use an allocator that relies on
* the size argument.
*/
if (dealloc.need_size_for_free()) {
PERR("C++ runtime: delete called with allocator, which needs "
"'size' on free. Blocking before leaking memory...");
sleep_forever();
}
/* size not required, so call with dummy size */
dealloc.free(ptr, 0);
}
void operator delete (void *ptr, Deallocator *dealloc) { try_dealloc(ptr, *dealloc); }
void operator delete (void *ptr, Deallocator &dealloc) { try_dealloc(ptr, dealloc); }
/*
* The 'delete (void *)' operator gets referenced by compiler generated code,
* so it must be publicly defined in the 'cxx' library. These compiler
* generated calls seem to get executed only subsequently to explicit
* 'delete (void *)' calls in application code, which are not supported by the
* 'cxx' library, so the 'delete (void *)' implementation in the 'cxx' library
* does not have to do anything. Applications should use the 'delete (void *)'
* implementation of the 'stdcxx' library instead. To make this possible, the
* 'delete (void *)' implementation in the 'cxx' library must be 'weak'.
*/
__attribute__((weak)) void operator delete (void *)
{
PERR("cxx: operator delete (void *) called - not implemented. "
"A working implementation is available in the 'stdcxx' library.");
}

View File

@ -0,0 +1,48 @@
/*
* \brief Wrapper for symbols required by libgcc_eh's exception handling
* \author Sebastian Sumpf
* \date 2015-09-04
*
* The wrapper always calls a function with prefix '_cxx'. In 'cxx.mk' we prefix
* the wrapped functions with '_cxx'. This whole procedure became necessary
* since the wrapped symbols are marked 'GLOBAL', 'HIDDEN' in libgcc_eh.a and
* thus ligcc_eh had to be linked to *all* binaries. In shared libaries this
* became unfeasible since libgcc_eh uses global data which might not be
* initialized during cross-library interaction. The clean way to go would be
* to link libgcc_s.so to DSOs and dynamic binaries, unfortunally ligcc_s
* requires libc6 in the current Genode tool chain.
*/
/*
* Copyright (C) 2011-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Unwind function found in all binaries */
void _cxx__Unwind_Resume(void *exc) __attribute__((weak));
void _Unwind_Resume(void *exc) {
_cxx__Unwind_Resume(exc); }
void _cxx__Unwind_DeleteException(void *exc) __attribute__((weak));
void _Unwind_DeleteException(void *exc) {
_cxx__Unwind_DeleteException(exc); }
/* Special ARM-EABI personality functions */
#ifdef __ARM_EABI__
int _cxx___aeabi_unwind_cpp_pr0(int state, void *block, void *context) __attribute__((weak));
int __aeabi_unwind_cpp_pr0(int state, void *block, void *context) {
return _cxx___aeabi_unwind_cpp_pr0(state, block, context); }
int _cxx___aeabi_unwind_cpp_pr1(int state, void *block, void *context) __attribute__((weak));
int __aeabi_unwind_cpp_pr1(int state, void *block, void *context) {
return _cxx___aeabi_unwind_cpp_pr1(state, block, context); }
/* Unwind functions found in some binaries */
void _cxx__Unwind_Complete(void *exc) __attribute__((weak));
void _Unwind_Complete(void *exc) {
_cxx__Unwind_Complete(exc); }
#endif /* __ARM_EABI__ */